1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "xattr.h" 23 #include "trace.h" 24 #include <trace/events/f2fs.h> 25 26 #define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 27 28 static struct kmem_cache *nat_entry_slab; 29 static struct kmem_cache *free_nid_slab; 30 static struct kmem_cache *nat_entry_set_slab; 31 32 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 33 { 34 struct f2fs_nm_info *nm_i = NM_I(sbi); 35 struct sysinfo val; 36 unsigned long avail_ram; 37 unsigned long mem_size = 0; 38 bool res = false; 39 40 si_meminfo(&val); 41 42 /* only uses low memory */ 43 avail_ram = val.totalram - val.totalhigh; 44 45 /* 46 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 47 */ 48 if (type == FREE_NIDS) { 49 mem_size = (nm_i->nid_cnt[FREE_NID] * 50 sizeof(struct free_nid)) >> PAGE_SHIFT; 51 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 52 } else if (type == NAT_ENTRIES) { 53 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 54 PAGE_SHIFT; 55 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 56 if (excess_cached_nats(sbi)) 57 res = false; 58 } else if (type == DIRTY_DENTS) { 59 if (sbi->sb->s_bdi->wb.dirty_exceeded) 60 return false; 61 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 62 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 63 } else if (type == INO_ENTRIES) { 64 int i; 65 66 for (i = 0; i < MAX_INO_ENTRY; i++) 67 mem_size += sbi->im[i].ino_num * 68 sizeof(struct ino_entry); 69 mem_size >>= PAGE_SHIFT; 70 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 71 } else if (type == EXTENT_CACHE) { 72 mem_size = (atomic_read(&sbi->total_ext_tree) * 73 sizeof(struct extent_tree) + 74 atomic_read(&sbi->total_ext_node) * 75 sizeof(struct extent_node)) >> PAGE_SHIFT; 76 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 77 } else if (type == INMEM_PAGES) { 78 /* it allows 20% / total_ram for inmemory pages */ 79 mem_size = get_pages(sbi, F2FS_INMEM_PAGES); 80 res = mem_size < (val.totalram / 5); 81 } else { 82 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 83 return true; 84 } 85 return res; 86 } 87 88 static void clear_node_page_dirty(struct page *page) 89 { 90 struct address_space *mapping = page->mapping; 91 unsigned int long flags; 92 93 if (PageDirty(page)) { 94 spin_lock_irqsave(&mapping->tree_lock, flags); 95 radix_tree_tag_clear(&mapping->page_tree, 96 page_index(page), 97 PAGECACHE_TAG_DIRTY); 98 spin_unlock_irqrestore(&mapping->tree_lock, flags); 99 100 clear_page_dirty_for_io(page); 101 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 102 } 103 ClearPageUptodate(page); 104 } 105 106 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 107 { 108 pgoff_t index = current_nat_addr(sbi, nid); 109 return get_meta_page(sbi, index); 110 } 111 112 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 113 { 114 struct page *src_page; 115 struct page *dst_page; 116 pgoff_t src_off; 117 pgoff_t dst_off; 118 void *src_addr; 119 void *dst_addr; 120 struct f2fs_nm_info *nm_i = NM_I(sbi); 121 122 src_off = current_nat_addr(sbi, nid); 123 dst_off = next_nat_addr(sbi, src_off); 124 125 /* get current nat block page with lock */ 126 src_page = get_meta_page(sbi, src_off); 127 dst_page = grab_meta_page(sbi, dst_off); 128 f2fs_bug_on(sbi, PageDirty(src_page)); 129 130 src_addr = page_address(src_page); 131 dst_addr = page_address(dst_page); 132 memcpy(dst_addr, src_addr, PAGE_SIZE); 133 set_page_dirty(dst_page); 134 f2fs_put_page(src_page, 1); 135 136 set_to_next_nat(nm_i, nid); 137 138 return dst_page; 139 } 140 141 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) 142 { 143 struct nat_entry *new; 144 145 if (no_fail) 146 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 147 else 148 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 149 if (new) { 150 nat_set_nid(new, nid); 151 nat_reset_flag(new); 152 } 153 return new; 154 } 155 156 static void __free_nat_entry(struct nat_entry *e) 157 { 158 kmem_cache_free(nat_entry_slab, e); 159 } 160 161 /* must be locked by nat_tree_lock */ 162 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 163 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 164 { 165 if (no_fail) 166 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 167 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 168 return NULL; 169 170 if (raw_ne) 171 node_info_from_raw_nat(&ne->ni, raw_ne); 172 list_add_tail(&ne->list, &nm_i->nat_entries); 173 nm_i->nat_cnt++; 174 return ne; 175 } 176 177 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 178 { 179 return radix_tree_lookup(&nm_i->nat_root, n); 180 } 181 182 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 183 nid_t start, unsigned int nr, struct nat_entry **ep) 184 { 185 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 186 } 187 188 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 189 { 190 list_del(&e->list); 191 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 192 nm_i->nat_cnt--; 193 __free_nat_entry(e); 194 } 195 196 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 197 struct nat_entry *ne) 198 { 199 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 200 struct nat_entry_set *head; 201 202 head = radix_tree_lookup(&nm_i->nat_set_root, set); 203 if (!head) { 204 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 205 206 INIT_LIST_HEAD(&head->entry_list); 207 INIT_LIST_HEAD(&head->set_list); 208 head->set = set; 209 head->entry_cnt = 0; 210 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 211 } 212 213 if (get_nat_flag(ne, IS_DIRTY)) 214 goto refresh_list; 215 216 nm_i->dirty_nat_cnt++; 217 head->entry_cnt++; 218 set_nat_flag(ne, IS_DIRTY, true); 219 refresh_list: 220 if (nat_get_blkaddr(ne) == NEW_ADDR) 221 list_del_init(&ne->list); 222 else 223 list_move_tail(&ne->list, &head->entry_list); 224 } 225 226 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 227 struct nat_entry_set *set, struct nat_entry *ne) 228 { 229 list_move_tail(&ne->list, &nm_i->nat_entries); 230 set_nat_flag(ne, IS_DIRTY, false); 231 set->entry_cnt--; 232 nm_i->dirty_nat_cnt--; 233 } 234 235 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 236 nid_t start, unsigned int nr, struct nat_entry_set **ep) 237 { 238 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 239 start, nr); 240 } 241 242 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 243 { 244 struct f2fs_nm_info *nm_i = NM_I(sbi); 245 struct nat_entry *e; 246 bool need = false; 247 248 down_read(&nm_i->nat_tree_lock); 249 e = __lookup_nat_cache(nm_i, nid); 250 if (e) { 251 if (!get_nat_flag(e, IS_CHECKPOINTED) && 252 !get_nat_flag(e, HAS_FSYNCED_INODE)) 253 need = true; 254 } 255 up_read(&nm_i->nat_tree_lock); 256 return need; 257 } 258 259 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 260 { 261 struct f2fs_nm_info *nm_i = NM_I(sbi); 262 struct nat_entry *e; 263 bool is_cp = true; 264 265 down_read(&nm_i->nat_tree_lock); 266 e = __lookup_nat_cache(nm_i, nid); 267 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 268 is_cp = false; 269 up_read(&nm_i->nat_tree_lock); 270 return is_cp; 271 } 272 273 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 274 { 275 struct f2fs_nm_info *nm_i = NM_I(sbi); 276 struct nat_entry *e; 277 bool need_update = true; 278 279 down_read(&nm_i->nat_tree_lock); 280 e = __lookup_nat_cache(nm_i, ino); 281 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 282 (get_nat_flag(e, IS_CHECKPOINTED) || 283 get_nat_flag(e, HAS_FSYNCED_INODE))) 284 need_update = false; 285 up_read(&nm_i->nat_tree_lock); 286 return need_update; 287 } 288 289 /* must be locked by nat_tree_lock */ 290 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 291 struct f2fs_nat_entry *ne) 292 { 293 struct f2fs_nm_info *nm_i = NM_I(sbi); 294 struct nat_entry *new, *e; 295 296 new = __alloc_nat_entry(nid, false); 297 if (!new) 298 return; 299 300 down_write(&nm_i->nat_tree_lock); 301 e = __lookup_nat_cache(nm_i, nid); 302 if (!e) 303 e = __init_nat_entry(nm_i, new, ne, false); 304 else 305 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 306 nat_get_blkaddr(e) != 307 le32_to_cpu(ne->block_addr) || 308 nat_get_version(e) != ne->version); 309 up_write(&nm_i->nat_tree_lock); 310 if (e != new) 311 __free_nat_entry(new); 312 } 313 314 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 315 block_t new_blkaddr, bool fsync_done) 316 { 317 struct f2fs_nm_info *nm_i = NM_I(sbi); 318 struct nat_entry *e; 319 struct nat_entry *new = __alloc_nat_entry(ni->nid, true); 320 321 down_write(&nm_i->nat_tree_lock); 322 e = __lookup_nat_cache(nm_i, ni->nid); 323 if (!e) { 324 e = __init_nat_entry(nm_i, new, NULL, true); 325 copy_node_info(&e->ni, ni); 326 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 327 } else if (new_blkaddr == NEW_ADDR) { 328 /* 329 * when nid is reallocated, 330 * previous nat entry can be remained in nat cache. 331 * So, reinitialize it with new information. 332 */ 333 copy_node_info(&e->ni, ni); 334 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 335 } 336 /* let's free early to reduce memory consumption */ 337 if (e != new) 338 __free_nat_entry(new); 339 340 /* sanity check */ 341 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 342 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 343 new_blkaddr == NULL_ADDR); 344 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 345 new_blkaddr == NEW_ADDR); 346 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 347 nat_get_blkaddr(e) != NULL_ADDR && 348 new_blkaddr == NEW_ADDR); 349 350 /* increment version no as node is removed */ 351 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 352 unsigned char version = nat_get_version(e); 353 nat_set_version(e, inc_node_version(version)); 354 } 355 356 /* change address */ 357 nat_set_blkaddr(e, new_blkaddr); 358 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 359 set_nat_flag(e, IS_CHECKPOINTED, false); 360 __set_nat_cache_dirty(nm_i, e); 361 362 /* update fsync_mark if its inode nat entry is still alive */ 363 if (ni->nid != ni->ino) 364 e = __lookup_nat_cache(nm_i, ni->ino); 365 if (e) { 366 if (fsync_done && ni->nid == ni->ino) 367 set_nat_flag(e, HAS_FSYNCED_INODE, true); 368 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 369 } 370 up_write(&nm_i->nat_tree_lock); 371 } 372 373 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 374 { 375 struct f2fs_nm_info *nm_i = NM_I(sbi); 376 int nr = nr_shrink; 377 378 if (!down_write_trylock(&nm_i->nat_tree_lock)) 379 return 0; 380 381 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 382 struct nat_entry *ne; 383 ne = list_first_entry(&nm_i->nat_entries, 384 struct nat_entry, list); 385 __del_from_nat_cache(nm_i, ne); 386 nr_shrink--; 387 } 388 up_write(&nm_i->nat_tree_lock); 389 return nr - nr_shrink; 390 } 391 392 /* 393 * This function always returns success 394 */ 395 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 396 { 397 struct f2fs_nm_info *nm_i = NM_I(sbi); 398 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 399 struct f2fs_journal *journal = curseg->journal; 400 nid_t start_nid = START_NID(nid); 401 struct f2fs_nat_block *nat_blk; 402 struct page *page = NULL; 403 struct f2fs_nat_entry ne; 404 struct nat_entry *e; 405 pgoff_t index; 406 int i; 407 408 ni->nid = nid; 409 410 /* Check nat cache */ 411 down_read(&nm_i->nat_tree_lock); 412 e = __lookup_nat_cache(nm_i, nid); 413 if (e) { 414 ni->ino = nat_get_ino(e); 415 ni->blk_addr = nat_get_blkaddr(e); 416 ni->version = nat_get_version(e); 417 up_read(&nm_i->nat_tree_lock); 418 return; 419 } 420 421 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 422 423 /* Check current segment summary */ 424 down_read(&curseg->journal_rwsem); 425 i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 426 if (i >= 0) { 427 ne = nat_in_journal(journal, i); 428 node_info_from_raw_nat(ni, &ne); 429 } 430 up_read(&curseg->journal_rwsem); 431 if (i >= 0) { 432 up_read(&nm_i->nat_tree_lock); 433 goto cache; 434 } 435 436 /* Fill node_info from nat page */ 437 index = current_nat_addr(sbi, nid); 438 up_read(&nm_i->nat_tree_lock); 439 440 page = get_meta_page(sbi, index); 441 nat_blk = (struct f2fs_nat_block *)page_address(page); 442 ne = nat_blk->entries[nid - start_nid]; 443 node_info_from_raw_nat(ni, &ne); 444 f2fs_put_page(page, 1); 445 cache: 446 /* cache nat entry */ 447 cache_nat_entry(sbi, nid, &ne); 448 } 449 450 /* 451 * readahead MAX_RA_NODE number of node pages. 452 */ 453 static void ra_node_pages(struct page *parent, int start, int n) 454 { 455 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 456 struct blk_plug plug; 457 int i, end; 458 nid_t nid; 459 460 blk_start_plug(&plug); 461 462 /* Then, try readahead for siblings of the desired node */ 463 end = start + n; 464 end = min(end, NIDS_PER_BLOCK); 465 for (i = start; i < end; i++) { 466 nid = get_nid(parent, i, false); 467 ra_node_page(sbi, nid); 468 } 469 470 blk_finish_plug(&plug); 471 } 472 473 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 474 { 475 const long direct_index = ADDRS_PER_INODE(dn->inode); 476 const long direct_blks = ADDRS_PER_BLOCK; 477 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 478 unsigned int skipped_unit = ADDRS_PER_BLOCK; 479 int cur_level = dn->cur_level; 480 int max_level = dn->max_level; 481 pgoff_t base = 0; 482 483 if (!dn->max_level) 484 return pgofs + 1; 485 486 while (max_level-- > cur_level) 487 skipped_unit *= NIDS_PER_BLOCK; 488 489 switch (dn->max_level) { 490 case 3: 491 base += 2 * indirect_blks; 492 case 2: 493 base += 2 * direct_blks; 494 case 1: 495 base += direct_index; 496 break; 497 default: 498 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 499 } 500 501 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 502 } 503 504 /* 505 * The maximum depth is four. 506 * Offset[0] will have raw inode offset. 507 */ 508 static int get_node_path(struct inode *inode, long block, 509 int offset[4], unsigned int noffset[4]) 510 { 511 const long direct_index = ADDRS_PER_INODE(inode); 512 const long direct_blks = ADDRS_PER_BLOCK; 513 const long dptrs_per_blk = NIDS_PER_BLOCK; 514 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 515 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 516 int n = 0; 517 int level = 0; 518 519 noffset[0] = 0; 520 521 if (block < direct_index) { 522 offset[n] = block; 523 goto got; 524 } 525 block -= direct_index; 526 if (block < direct_blks) { 527 offset[n++] = NODE_DIR1_BLOCK; 528 noffset[n] = 1; 529 offset[n] = block; 530 level = 1; 531 goto got; 532 } 533 block -= direct_blks; 534 if (block < direct_blks) { 535 offset[n++] = NODE_DIR2_BLOCK; 536 noffset[n] = 2; 537 offset[n] = block; 538 level = 1; 539 goto got; 540 } 541 block -= direct_blks; 542 if (block < indirect_blks) { 543 offset[n++] = NODE_IND1_BLOCK; 544 noffset[n] = 3; 545 offset[n++] = block / direct_blks; 546 noffset[n] = 4 + offset[n - 1]; 547 offset[n] = block % direct_blks; 548 level = 2; 549 goto got; 550 } 551 block -= indirect_blks; 552 if (block < indirect_blks) { 553 offset[n++] = NODE_IND2_BLOCK; 554 noffset[n] = 4 + dptrs_per_blk; 555 offset[n++] = block / direct_blks; 556 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 557 offset[n] = block % direct_blks; 558 level = 2; 559 goto got; 560 } 561 block -= indirect_blks; 562 if (block < dindirect_blks) { 563 offset[n++] = NODE_DIND_BLOCK; 564 noffset[n] = 5 + (dptrs_per_blk * 2); 565 offset[n++] = block / indirect_blks; 566 noffset[n] = 6 + (dptrs_per_blk * 2) + 567 offset[n - 1] * (dptrs_per_blk + 1); 568 offset[n++] = (block / direct_blks) % dptrs_per_blk; 569 noffset[n] = 7 + (dptrs_per_blk * 2) + 570 offset[n - 2] * (dptrs_per_blk + 1) + 571 offset[n - 1]; 572 offset[n] = block % direct_blks; 573 level = 3; 574 goto got; 575 } else { 576 return -E2BIG; 577 } 578 got: 579 return level; 580 } 581 582 /* 583 * Caller should call f2fs_put_dnode(dn). 584 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 585 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 586 * In the case of RDONLY_NODE, we don't need to care about mutex. 587 */ 588 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 589 { 590 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 591 struct page *npage[4]; 592 struct page *parent = NULL; 593 int offset[4]; 594 unsigned int noffset[4]; 595 nid_t nids[4]; 596 int level, i = 0; 597 int err = 0; 598 599 level = get_node_path(dn->inode, index, offset, noffset); 600 if (level < 0) 601 return level; 602 603 nids[0] = dn->inode->i_ino; 604 npage[0] = dn->inode_page; 605 606 if (!npage[0]) { 607 npage[0] = get_node_page(sbi, nids[0]); 608 if (IS_ERR(npage[0])) 609 return PTR_ERR(npage[0]); 610 } 611 612 /* if inline_data is set, should not report any block indices */ 613 if (f2fs_has_inline_data(dn->inode) && index) { 614 err = -ENOENT; 615 f2fs_put_page(npage[0], 1); 616 goto release_out; 617 } 618 619 parent = npage[0]; 620 if (level != 0) 621 nids[1] = get_nid(parent, offset[0], true); 622 dn->inode_page = npage[0]; 623 dn->inode_page_locked = true; 624 625 /* get indirect or direct nodes */ 626 for (i = 1; i <= level; i++) { 627 bool done = false; 628 629 if (!nids[i] && mode == ALLOC_NODE) { 630 /* alloc new node */ 631 if (!alloc_nid(sbi, &(nids[i]))) { 632 err = -ENOSPC; 633 goto release_pages; 634 } 635 636 dn->nid = nids[i]; 637 npage[i] = new_node_page(dn, noffset[i]); 638 if (IS_ERR(npage[i])) { 639 alloc_nid_failed(sbi, nids[i]); 640 err = PTR_ERR(npage[i]); 641 goto release_pages; 642 } 643 644 set_nid(parent, offset[i - 1], nids[i], i == 1); 645 alloc_nid_done(sbi, nids[i]); 646 done = true; 647 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 648 npage[i] = get_node_page_ra(parent, offset[i - 1]); 649 if (IS_ERR(npage[i])) { 650 err = PTR_ERR(npage[i]); 651 goto release_pages; 652 } 653 done = true; 654 } 655 if (i == 1) { 656 dn->inode_page_locked = false; 657 unlock_page(parent); 658 } else { 659 f2fs_put_page(parent, 1); 660 } 661 662 if (!done) { 663 npage[i] = get_node_page(sbi, nids[i]); 664 if (IS_ERR(npage[i])) { 665 err = PTR_ERR(npage[i]); 666 f2fs_put_page(npage[0], 0); 667 goto release_out; 668 } 669 } 670 if (i < level) { 671 parent = npage[i]; 672 nids[i + 1] = get_nid(parent, offset[i], false); 673 } 674 } 675 dn->nid = nids[level]; 676 dn->ofs_in_node = offset[level]; 677 dn->node_page = npage[level]; 678 dn->data_blkaddr = datablock_addr(dn->inode, 679 dn->node_page, dn->ofs_in_node); 680 return 0; 681 682 release_pages: 683 f2fs_put_page(parent, 1); 684 if (i > 1) 685 f2fs_put_page(npage[0], 0); 686 release_out: 687 dn->inode_page = NULL; 688 dn->node_page = NULL; 689 if (err == -ENOENT) { 690 dn->cur_level = i; 691 dn->max_level = level; 692 dn->ofs_in_node = offset[level]; 693 } 694 return err; 695 } 696 697 static void truncate_node(struct dnode_of_data *dn) 698 { 699 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 700 struct node_info ni; 701 702 get_node_info(sbi, dn->nid, &ni); 703 704 /* Deallocate node address */ 705 invalidate_blocks(sbi, ni.blk_addr); 706 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 707 set_node_addr(sbi, &ni, NULL_ADDR, false); 708 709 if (dn->nid == dn->inode->i_ino) { 710 remove_orphan_inode(sbi, dn->nid); 711 dec_valid_inode_count(sbi); 712 f2fs_inode_synced(dn->inode); 713 } 714 715 clear_node_page_dirty(dn->node_page); 716 set_sbi_flag(sbi, SBI_IS_DIRTY); 717 718 f2fs_put_page(dn->node_page, 1); 719 720 invalidate_mapping_pages(NODE_MAPPING(sbi), 721 dn->node_page->index, dn->node_page->index); 722 723 dn->node_page = NULL; 724 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 725 } 726 727 static int truncate_dnode(struct dnode_of_data *dn) 728 { 729 struct page *page; 730 731 if (dn->nid == 0) 732 return 1; 733 734 /* get direct node */ 735 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 736 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 737 return 1; 738 else if (IS_ERR(page)) 739 return PTR_ERR(page); 740 741 /* Make dnode_of_data for parameter */ 742 dn->node_page = page; 743 dn->ofs_in_node = 0; 744 truncate_data_blocks(dn); 745 truncate_node(dn); 746 return 1; 747 } 748 749 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 750 int ofs, int depth) 751 { 752 struct dnode_of_data rdn = *dn; 753 struct page *page; 754 struct f2fs_node *rn; 755 nid_t child_nid; 756 unsigned int child_nofs; 757 int freed = 0; 758 int i, ret; 759 760 if (dn->nid == 0) 761 return NIDS_PER_BLOCK + 1; 762 763 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 764 765 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 766 if (IS_ERR(page)) { 767 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 768 return PTR_ERR(page); 769 } 770 771 ra_node_pages(page, ofs, NIDS_PER_BLOCK); 772 773 rn = F2FS_NODE(page); 774 if (depth < 3) { 775 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 776 child_nid = le32_to_cpu(rn->in.nid[i]); 777 if (child_nid == 0) 778 continue; 779 rdn.nid = child_nid; 780 ret = truncate_dnode(&rdn); 781 if (ret < 0) 782 goto out_err; 783 if (set_nid(page, i, 0, false)) 784 dn->node_changed = true; 785 } 786 } else { 787 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 788 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 789 child_nid = le32_to_cpu(rn->in.nid[i]); 790 if (child_nid == 0) { 791 child_nofs += NIDS_PER_BLOCK + 1; 792 continue; 793 } 794 rdn.nid = child_nid; 795 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 796 if (ret == (NIDS_PER_BLOCK + 1)) { 797 if (set_nid(page, i, 0, false)) 798 dn->node_changed = true; 799 child_nofs += ret; 800 } else if (ret < 0 && ret != -ENOENT) { 801 goto out_err; 802 } 803 } 804 freed = child_nofs; 805 } 806 807 if (!ofs) { 808 /* remove current indirect node */ 809 dn->node_page = page; 810 truncate_node(dn); 811 freed++; 812 } else { 813 f2fs_put_page(page, 1); 814 } 815 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 816 return freed; 817 818 out_err: 819 f2fs_put_page(page, 1); 820 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 821 return ret; 822 } 823 824 static int truncate_partial_nodes(struct dnode_of_data *dn, 825 struct f2fs_inode *ri, int *offset, int depth) 826 { 827 struct page *pages[2]; 828 nid_t nid[3]; 829 nid_t child_nid; 830 int err = 0; 831 int i; 832 int idx = depth - 2; 833 834 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 835 if (!nid[0]) 836 return 0; 837 838 /* get indirect nodes in the path */ 839 for (i = 0; i < idx + 1; i++) { 840 /* reference count'll be increased */ 841 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 842 if (IS_ERR(pages[i])) { 843 err = PTR_ERR(pages[i]); 844 idx = i - 1; 845 goto fail; 846 } 847 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 848 } 849 850 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 851 852 /* free direct nodes linked to a partial indirect node */ 853 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 854 child_nid = get_nid(pages[idx], i, false); 855 if (!child_nid) 856 continue; 857 dn->nid = child_nid; 858 err = truncate_dnode(dn); 859 if (err < 0) 860 goto fail; 861 if (set_nid(pages[idx], i, 0, false)) 862 dn->node_changed = true; 863 } 864 865 if (offset[idx + 1] == 0) { 866 dn->node_page = pages[idx]; 867 dn->nid = nid[idx]; 868 truncate_node(dn); 869 } else { 870 f2fs_put_page(pages[idx], 1); 871 } 872 offset[idx]++; 873 offset[idx + 1] = 0; 874 idx--; 875 fail: 876 for (i = idx; i >= 0; i--) 877 f2fs_put_page(pages[i], 1); 878 879 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 880 881 return err; 882 } 883 884 /* 885 * All the block addresses of data and nodes should be nullified. 886 */ 887 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 888 { 889 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 890 int err = 0, cont = 1; 891 int level, offset[4], noffset[4]; 892 unsigned int nofs = 0; 893 struct f2fs_inode *ri; 894 struct dnode_of_data dn; 895 struct page *page; 896 897 trace_f2fs_truncate_inode_blocks_enter(inode, from); 898 899 level = get_node_path(inode, from, offset, noffset); 900 if (level < 0) 901 return level; 902 903 page = get_node_page(sbi, inode->i_ino); 904 if (IS_ERR(page)) { 905 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 906 return PTR_ERR(page); 907 } 908 909 set_new_dnode(&dn, inode, page, NULL, 0); 910 unlock_page(page); 911 912 ri = F2FS_INODE(page); 913 switch (level) { 914 case 0: 915 case 1: 916 nofs = noffset[1]; 917 break; 918 case 2: 919 nofs = noffset[1]; 920 if (!offset[level - 1]) 921 goto skip_partial; 922 err = truncate_partial_nodes(&dn, ri, offset, level); 923 if (err < 0 && err != -ENOENT) 924 goto fail; 925 nofs += 1 + NIDS_PER_BLOCK; 926 break; 927 case 3: 928 nofs = 5 + 2 * NIDS_PER_BLOCK; 929 if (!offset[level - 1]) 930 goto skip_partial; 931 err = truncate_partial_nodes(&dn, ri, offset, level); 932 if (err < 0 && err != -ENOENT) 933 goto fail; 934 break; 935 default: 936 BUG(); 937 } 938 939 skip_partial: 940 while (cont) { 941 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 942 switch (offset[0]) { 943 case NODE_DIR1_BLOCK: 944 case NODE_DIR2_BLOCK: 945 err = truncate_dnode(&dn); 946 break; 947 948 case NODE_IND1_BLOCK: 949 case NODE_IND2_BLOCK: 950 err = truncate_nodes(&dn, nofs, offset[1], 2); 951 break; 952 953 case NODE_DIND_BLOCK: 954 err = truncate_nodes(&dn, nofs, offset[1], 3); 955 cont = 0; 956 break; 957 958 default: 959 BUG(); 960 } 961 if (err < 0 && err != -ENOENT) 962 goto fail; 963 if (offset[1] == 0 && 964 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 965 lock_page(page); 966 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 967 f2fs_wait_on_page_writeback(page, NODE, true); 968 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 969 set_page_dirty(page); 970 unlock_page(page); 971 } 972 offset[1] = 0; 973 offset[0]++; 974 nofs += err; 975 } 976 fail: 977 f2fs_put_page(page, 0); 978 trace_f2fs_truncate_inode_blocks_exit(inode, err); 979 return err > 0 ? 0 : err; 980 } 981 982 /* caller must lock inode page */ 983 int truncate_xattr_node(struct inode *inode) 984 { 985 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 986 nid_t nid = F2FS_I(inode)->i_xattr_nid; 987 struct dnode_of_data dn; 988 struct page *npage; 989 990 if (!nid) 991 return 0; 992 993 npage = get_node_page(sbi, nid); 994 if (IS_ERR(npage)) 995 return PTR_ERR(npage); 996 997 f2fs_i_xnid_write(inode, 0); 998 999 set_new_dnode(&dn, inode, NULL, npage, nid); 1000 truncate_node(&dn); 1001 return 0; 1002 } 1003 1004 /* 1005 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1006 * f2fs_unlock_op(). 1007 */ 1008 int remove_inode_page(struct inode *inode) 1009 { 1010 struct dnode_of_data dn; 1011 int err; 1012 1013 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1014 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1015 if (err) 1016 return err; 1017 1018 err = truncate_xattr_node(inode); 1019 if (err) { 1020 f2fs_put_dnode(&dn); 1021 return err; 1022 } 1023 1024 /* remove potential inline_data blocks */ 1025 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1026 S_ISLNK(inode->i_mode)) 1027 truncate_data_blocks_range(&dn, 1); 1028 1029 /* 0 is possible, after f2fs_new_inode() has failed */ 1030 f2fs_bug_on(F2FS_I_SB(inode), 1031 inode->i_blocks != 0 && inode->i_blocks != 8); 1032 1033 /* will put inode & node pages */ 1034 truncate_node(&dn); 1035 return 0; 1036 } 1037 1038 struct page *new_inode_page(struct inode *inode) 1039 { 1040 struct dnode_of_data dn; 1041 1042 /* allocate inode page for new inode */ 1043 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1044 1045 /* caller should f2fs_put_page(page, 1); */ 1046 return new_node_page(&dn, 0); 1047 } 1048 1049 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1050 { 1051 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1052 struct node_info new_ni; 1053 struct page *page; 1054 int err; 1055 1056 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1057 return ERR_PTR(-EPERM); 1058 1059 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1060 if (!page) 1061 return ERR_PTR(-ENOMEM); 1062 1063 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1064 goto fail; 1065 1066 #ifdef CONFIG_F2FS_CHECK_FS 1067 get_node_info(sbi, dn->nid, &new_ni); 1068 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); 1069 #endif 1070 new_ni.nid = dn->nid; 1071 new_ni.ino = dn->inode->i_ino; 1072 new_ni.blk_addr = NULL_ADDR; 1073 new_ni.flag = 0; 1074 new_ni.version = 0; 1075 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1076 1077 f2fs_wait_on_page_writeback(page, NODE, true); 1078 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1079 set_cold_node(dn->inode, page); 1080 if (!PageUptodate(page)) 1081 SetPageUptodate(page); 1082 if (set_page_dirty(page)) 1083 dn->node_changed = true; 1084 1085 if (f2fs_has_xattr_block(ofs)) 1086 f2fs_i_xnid_write(dn->inode, dn->nid); 1087 1088 if (ofs == 0) 1089 inc_valid_inode_count(sbi); 1090 return page; 1091 1092 fail: 1093 clear_node_page_dirty(page); 1094 f2fs_put_page(page, 1); 1095 return ERR_PTR(err); 1096 } 1097 1098 /* 1099 * Caller should do after getting the following values. 1100 * 0: f2fs_put_page(page, 0) 1101 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1102 */ 1103 static int read_node_page(struct page *page, int op_flags) 1104 { 1105 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1106 struct node_info ni; 1107 struct f2fs_io_info fio = { 1108 .sbi = sbi, 1109 .type = NODE, 1110 .op = REQ_OP_READ, 1111 .op_flags = op_flags, 1112 .page = page, 1113 .encrypted_page = NULL, 1114 }; 1115 1116 if (PageUptodate(page)) 1117 return LOCKED_PAGE; 1118 1119 get_node_info(sbi, page->index, &ni); 1120 1121 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1122 ClearPageUptodate(page); 1123 return -ENOENT; 1124 } 1125 1126 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1127 return f2fs_submit_page_bio(&fio); 1128 } 1129 1130 /* 1131 * Readahead a node page 1132 */ 1133 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1134 { 1135 struct page *apage; 1136 int err; 1137 1138 if (!nid) 1139 return; 1140 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1141 1142 rcu_read_lock(); 1143 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid); 1144 rcu_read_unlock(); 1145 if (apage) 1146 return; 1147 1148 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1149 if (!apage) 1150 return; 1151 1152 err = read_node_page(apage, REQ_RAHEAD); 1153 f2fs_put_page(apage, err ? 1 : 0); 1154 } 1155 1156 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1157 struct page *parent, int start) 1158 { 1159 struct page *page; 1160 int err; 1161 1162 if (!nid) 1163 return ERR_PTR(-ENOENT); 1164 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1165 repeat: 1166 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1167 if (!page) 1168 return ERR_PTR(-ENOMEM); 1169 1170 err = read_node_page(page, 0); 1171 if (err < 0) { 1172 f2fs_put_page(page, 1); 1173 return ERR_PTR(err); 1174 } else if (err == LOCKED_PAGE) { 1175 err = 0; 1176 goto page_hit; 1177 } 1178 1179 if (parent) 1180 ra_node_pages(parent, start + 1, MAX_RA_NODE); 1181 1182 lock_page(page); 1183 1184 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1185 f2fs_put_page(page, 1); 1186 goto repeat; 1187 } 1188 1189 if (unlikely(!PageUptodate(page))) { 1190 err = -EIO; 1191 goto out_err; 1192 } 1193 1194 if (!f2fs_inode_chksum_verify(sbi, page)) { 1195 err = -EBADMSG; 1196 goto out_err; 1197 } 1198 page_hit: 1199 if(unlikely(nid != nid_of_node(page))) { 1200 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, " 1201 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1202 nid, nid_of_node(page), ino_of_node(page), 1203 ofs_of_node(page), cpver_of_node(page), 1204 next_blkaddr_of_node(page)); 1205 err = -EINVAL; 1206 out_err: 1207 ClearPageUptodate(page); 1208 f2fs_put_page(page, 1); 1209 return ERR_PTR(err); 1210 } 1211 return page; 1212 } 1213 1214 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1215 { 1216 return __get_node_page(sbi, nid, NULL, 0); 1217 } 1218 1219 struct page *get_node_page_ra(struct page *parent, int start) 1220 { 1221 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1222 nid_t nid = get_nid(parent, start, false); 1223 1224 return __get_node_page(sbi, nid, parent, start); 1225 } 1226 1227 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1228 { 1229 struct inode *inode; 1230 struct page *page; 1231 int ret; 1232 1233 /* should flush inline_data before evict_inode */ 1234 inode = ilookup(sbi->sb, ino); 1235 if (!inode) 1236 return; 1237 1238 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1239 FGP_LOCK|FGP_NOWAIT, 0); 1240 if (!page) 1241 goto iput_out; 1242 1243 if (!PageUptodate(page)) 1244 goto page_out; 1245 1246 if (!PageDirty(page)) 1247 goto page_out; 1248 1249 if (!clear_page_dirty_for_io(page)) 1250 goto page_out; 1251 1252 ret = f2fs_write_inline_data(inode, page); 1253 inode_dec_dirty_pages(inode); 1254 remove_dirty_inode(inode); 1255 if (ret) 1256 set_page_dirty(page); 1257 page_out: 1258 f2fs_put_page(page, 1); 1259 iput_out: 1260 iput(inode); 1261 } 1262 1263 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1264 { 1265 pgoff_t index; 1266 struct pagevec pvec; 1267 struct page *last_page = NULL; 1268 int nr_pages; 1269 1270 pagevec_init(&pvec); 1271 index = 0; 1272 1273 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1274 PAGECACHE_TAG_DIRTY))) { 1275 int i; 1276 1277 for (i = 0; i < nr_pages; i++) { 1278 struct page *page = pvec.pages[i]; 1279 1280 if (unlikely(f2fs_cp_error(sbi))) { 1281 f2fs_put_page(last_page, 0); 1282 pagevec_release(&pvec); 1283 return ERR_PTR(-EIO); 1284 } 1285 1286 if (!IS_DNODE(page) || !is_cold_node(page)) 1287 continue; 1288 if (ino_of_node(page) != ino) 1289 continue; 1290 1291 lock_page(page); 1292 1293 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1294 continue_unlock: 1295 unlock_page(page); 1296 continue; 1297 } 1298 if (ino_of_node(page) != ino) 1299 goto continue_unlock; 1300 1301 if (!PageDirty(page)) { 1302 /* someone wrote it for us */ 1303 goto continue_unlock; 1304 } 1305 1306 if (last_page) 1307 f2fs_put_page(last_page, 0); 1308 1309 get_page(page); 1310 last_page = page; 1311 unlock_page(page); 1312 } 1313 pagevec_release(&pvec); 1314 cond_resched(); 1315 } 1316 return last_page; 1317 } 1318 1319 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1320 struct writeback_control *wbc, bool do_balance, 1321 enum iostat_type io_type) 1322 { 1323 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1324 nid_t nid; 1325 struct node_info ni; 1326 struct f2fs_io_info fio = { 1327 .sbi = sbi, 1328 .ino = ino_of_node(page), 1329 .type = NODE, 1330 .op = REQ_OP_WRITE, 1331 .op_flags = wbc_to_write_flags(wbc), 1332 .page = page, 1333 .encrypted_page = NULL, 1334 .submitted = false, 1335 .io_type = io_type, 1336 .io_wbc = wbc, 1337 }; 1338 1339 trace_f2fs_writepage(page, NODE); 1340 1341 if (unlikely(f2fs_cp_error(sbi))) { 1342 dec_page_count(sbi, F2FS_DIRTY_NODES); 1343 unlock_page(page); 1344 return 0; 1345 } 1346 1347 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1348 goto redirty_out; 1349 1350 /* get old block addr of this node page */ 1351 nid = nid_of_node(page); 1352 f2fs_bug_on(sbi, page->index != nid); 1353 1354 if (wbc->for_reclaim) { 1355 if (!down_read_trylock(&sbi->node_write)) 1356 goto redirty_out; 1357 } else { 1358 down_read(&sbi->node_write); 1359 } 1360 1361 get_node_info(sbi, nid, &ni); 1362 1363 /* This page is already truncated */ 1364 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1365 ClearPageUptodate(page); 1366 dec_page_count(sbi, F2FS_DIRTY_NODES); 1367 up_read(&sbi->node_write); 1368 unlock_page(page); 1369 return 0; 1370 } 1371 1372 if (atomic && !test_opt(sbi, NOBARRIER)) 1373 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1374 1375 set_page_writeback(page); 1376 fio.old_blkaddr = ni.blk_addr; 1377 write_node_page(nid, &fio); 1378 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1379 dec_page_count(sbi, F2FS_DIRTY_NODES); 1380 up_read(&sbi->node_write); 1381 1382 if (wbc->for_reclaim) { 1383 f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0, 1384 page->index, NODE); 1385 submitted = NULL; 1386 } 1387 1388 unlock_page(page); 1389 1390 if (unlikely(f2fs_cp_error(sbi))) { 1391 f2fs_submit_merged_write(sbi, NODE); 1392 submitted = NULL; 1393 } 1394 if (submitted) 1395 *submitted = fio.submitted; 1396 1397 if (do_balance) 1398 f2fs_balance_fs(sbi, false); 1399 return 0; 1400 1401 redirty_out: 1402 redirty_page_for_writepage(wbc, page); 1403 return AOP_WRITEPAGE_ACTIVATE; 1404 } 1405 1406 void move_node_page(struct page *node_page, int gc_type) 1407 { 1408 if (gc_type == FG_GC) { 1409 struct writeback_control wbc = { 1410 .sync_mode = WB_SYNC_ALL, 1411 .nr_to_write = 1, 1412 .for_reclaim = 0, 1413 }; 1414 1415 set_page_dirty(node_page); 1416 f2fs_wait_on_page_writeback(node_page, NODE, true); 1417 1418 f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page)); 1419 if (!clear_page_dirty_for_io(node_page)) 1420 goto out_page; 1421 1422 if (__write_node_page(node_page, false, NULL, 1423 &wbc, false, FS_GC_NODE_IO)) 1424 unlock_page(node_page); 1425 goto release_page; 1426 } else { 1427 /* set page dirty and write it */ 1428 if (!PageWriteback(node_page)) 1429 set_page_dirty(node_page); 1430 } 1431 out_page: 1432 unlock_page(node_page); 1433 release_page: 1434 f2fs_put_page(node_page, 0); 1435 } 1436 1437 static int f2fs_write_node_page(struct page *page, 1438 struct writeback_control *wbc) 1439 { 1440 return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO); 1441 } 1442 1443 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1444 struct writeback_control *wbc, bool atomic) 1445 { 1446 pgoff_t index; 1447 pgoff_t last_idx = ULONG_MAX; 1448 struct pagevec pvec; 1449 int ret = 0; 1450 struct page *last_page = NULL; 1451 bool marked = false; 1452 nid_t ino = inode->i_ino; 1453 int nr_pages; 1454 1455 if (atomic) { 1456 last_page = last_fsync_dnode(sbi, ino); 1457 if (IS_ERR_OR_NULL(last_page)) 1458 return PTR_ERR_OR_ZERO(last_page); 1459 } 1460 retry: 1461 pagevec_init(&pvec); 1462 index = 0; 1463 1464 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1465 PAGECACHE_TAG_DIRTY))) { 1466 int i; 1467 1468 for (i = 0; i < nr_pages; i++) { 1469 struct page *page = pvec.pages[i]; 1470 bool submitted = false; 1471 1472 if (unlikely(f2fs_cp_error(sbi))) { 1473 f2fs_put_page(last_page, 0); 1474 pagevec_release(&pvec); 1475 ret = -EIO; 1476 goto out; 1477 } 1478 1479 if (!IS_DNODE(page) || !is_cold_node(page)) 1480 continue; 1481 if (ino_of_node(page) != ino) 1482 continue; 1483 1484 lock_page(page); 1485 1486 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1487 continue_unlock: 1488 unlock_page(page); 1489 continue; 1490 } 1491 if (ino_of_node(page) != ino) 1492 goto continue_unlock; 1493 1494 if (!PageDirty(page) && page != last_page) { 1495 /* someone wrote it for us */ 1496 goto continue_unlock; 1497 } 1498 1499 f2fs_wait_on_page_writeback(page, NODE, true); 1500 BUG_ON(PageWriteback(page)); 1501 1502 set_fsync_mark(page, 0); 1503 set_dentry_mark(page, 0); 1504 1505 if (!atomic || page == last_page) { 1506 set_fsync_mark(page, 1); 1507 if (IS_INODE(page)) { 1508 if (is_inode_flag_set(inode, 1509 FI_DIRTY_INODE)) 1510 update_inode(inode, page); 1511 set_dentry_mark(page, 1512 need_dentry_mark(sbi, ino)); 1513 } 1514 /* may be written by other thread */ 1515 if (!PageDirty(page)) 1516 set_page_dirty(page); 1517 } 1518 1519 if (!clear_page_dirty_for_io(page)) 1520 goto continue_unlock; 1521 1522 ret = __write_node_page(page, atomic && 1523 page == last_page, 1524 &submitted, wbc, true, 1525 FS_NODE_IO); 1526 if (ret) { 1527 unlock_page(page); 1528 f2fs_put_page(last_page, 0); 1529 break; 1530 } else if (submitted) { 1531 last_idx = page->index; 1532 } 1533 1534 if (page == last_page) { 1535 f2fs_put_page(page, 0); 1536 marked = true; 1537 break; 1538 } 1539 } 1540 pagevec_release(&pvec); 1541 cond_resched(); 1542 1543 if (ret || marked) 1544 break; 1545 } 1546 if (!ret && atomic && !marked) { 1547 f2fs_msg(sbi->sb, KERN_DEBUG, 1548 "Retry to write fsync mark: ino=%u, idx=%lx", 1549 ino, last_page->index); 1550 lock_page(last_page); 1551 f2fs_wait_on_page_writeback(last_page, NODE, true); 1552 set_page_dirty(last_page); 1553 unlock_page(last_page); 1554 goto retry; 1555 } 1556 out: 1557 if (last_idx != ULONG_MAX) 1558 f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE); 1559 return ret ? -EIO: 0; 1560 } 1561 1562 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, 1563 bool do_balance, enum iostat_type io_type) 1564 { 1565 pgoff_t index; 1566 struct pagevec pvec; 1567 int step = 0; 1568 int nwritten = 0; 1569 int ret = 0; 1570 int nr_pages; 1571 1572 pagevec_init(&pvec); 1573 1574 next_step: 1575 index = 0; 1576 1577 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1578 PAGECACHE_TAG_DIRTY))) { 1579 int i; 1580 1581 for (i = 0; i < nr_pages; i++) { 1582 struct page *page = pvec.pages[i]; 1583 bool submitted = false; 1584 1585 /* 1586 * flushing sequence with step: 1587 * 0. indirect nodes 1588 * 1. dentry dnodes 1589 * 2. file dnodes 1590 */ 1591 if (step == 0 && IS_DNODE(page)) 1592 continue; 1593 if (step == 1 && (!IS_DNODE(page) || 1594 is_cold_node(page))) 1595 continue; 1596 if (step == 2 && (!IS_DNODE(page) || 1597 !is_cold_node(page))) 1598 continue; 1599 lock_node: 1600 if (!trylock_page(page)) 1601 continue; 1602 1603 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1604 continue_unlock: 1605 unlock_page(page); 1606 continue; 1607 } 1608 1609 if (!PageDirty(page)) { 1610 /* someone wrote it for us */ 1611 goto continue_unlock; 1612 } 1613 1614 /* flush inline_data */ 1615 if (is_inline_node(page)) { 1616 clear_inline_node(page); 1617 unlock_page(page); 1618 flush_inline_data(sbi, ino_of_node(page)); 1619 goto lock_node; 1620 } 1621 1622 f2fs_wait_on_page_writeback(page, NODE, true); 1623 1624 BUG_ON(PageWriteback(page)); 1625 if (!clear_page_dirty_for_io(page)) 1626 goto continue_unlock; 1627 1628 set_fsync_mark(page, 0); 1629 set_dentry_mark(page, 0); 1630 1631 ret = __write_node_page(page, false, &submitted, 1632 wbc, do_balance, io_type); 1633 if (ret) 1634 unlock_page(page); 1635 else if (submitted) 1636 nwritten++; 1637 1638 if (--wbc->nr_to_write == 0) 1639 break; 1640 } 1641 pagevec_release(&pvec); 1642 cond_resched(); 1643 1644 if (wbc->nr_to_write == 0) { 1645 step = 2; 1646 break; 1647 } 1648 } 1649 1650 if (step < 2) { 1651 step++; 1652 goto next_step; 1653 } 1654 1655 if (nwritten) 1656 f2fs_submit_merged_write(sbi, NODE); 1657 1658 if (unlikely(f2fs_cp_error(sbi))) 1659 return -EIO; 1660 return ret; 1661 } 1662 1663 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1664 { 1665 pgoff_t index = 0; 1666 struct pagevec pvec; 1667 int ret2, ret = 0; 1668 int nr_pages; 1669 1670 pagevec_init(&pvec); 1671 1672 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1673 PAGECACHE_TAG_WRITEBACK))) { 1674 int i; 1675 1676 for (i = 0; i < nr_pages; i++) { 1677 struct page *page = pvec.pages[i]; 1678 1679 if (ino && ino_of_node(page) == ino) { 1680 f2fs_wait_on_page_writeback(page, NODE, true); 1681 if (TestClearPageError(page)) 1682 ret = -EIO; 1683 } 1684 } 1685 pagevec_release(&pvec); 1686 cond_resched(); 1687 } 1688 1689 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); 1690 if (!ret) 1691 ret = ret2; 1692 return ret; 1693 } 1694 1695 static int f2fs_write_node_pages(struct address_space *mapping, 1696 struct writeback_control *wbc) 1697 { 1698 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1699 struct blk_plug plug; 1700 long diff; 1701 1702 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1703 goto skip_write; 1704 1705 /* balancing f2fs's metadata in background */ 1706 f2fs_balance_fs_bg(sbi); 1707 1708 /* collect a number of dirty node pages and write together */ 1709 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1710 goto skip_write; 1711 1712 trace_f2fs_writepages(mapping->host, wbc, NODE); 1713 1714 diff = nr_pages_to_write(sbi, NODE, wbc); 1715 wbc->sync_mode = WB_SYNC_NONE; 1716 blk_start_plug(&plug); 1717 sync_node_pages(sbi, wbc, true, FS_NODE_IO); 1718 blk_finish_plug(&plug); 1719 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1720 return 0; 1721 1722 skip_write: 1723 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1724 trace_f2fs_writepages(mapping->host, wbc, NODE); 1725 return 0; 1726 } 1727 1728 static int f2fs_set_node_page_dirty(struct page *page) 1729 { 1730 trace_f2fs_set_page_dirty(page, NODE); 1731 1732 if (!PageUptodate(page)) 1733 SetPageUptodate(page); 1734 if (!PageDirty(page)) { 1735 f2fs_set_page_dirty_nobuffers(page); 1736 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1737 SetPagePrivate(page); 1738 f2fs_trace_pid(page); 1739 return 1; 1740 } 1741 return 0; 1742 } 1743 1744 /* 1745 * Structure of the f2fs node operations 1746 */ 1747 const struct address_space_operations f2fs_node_aops = { 1748 .writepage = f2fs_write_node_page, 1749 .writepages = f2fs_write_node_pages, 1750 .set_page_dirty = f2fs_set_node_page_dirty, 1751 .invalidatepage = f2fs_invalidate_page, 1752 .releasepage = f2fs_release_page, 1753 #ifdef CONFIG_MIGRATION 1754 .migratepage = f2fs_migrate_page, 1755 #endif 1756 }; 1757 1758 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1759 nid_t n) 1760 { 1761 return radix_tree_lookup(&nm_i->free_nid_root, n); 1762 } 1763 1764 static int __insert_free_nid(struct f2fs_sb_info *sbi, 1765 struct free_nid *i, enum nid_state state) 1766 { 1767 struct f2fs_nm_info *nm_i = NM_I(sbi); 1768 1769 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 1770 if (err) 1771 return err; 1772 1773 f2fs_bug_on(sbi, state != i->state); 1774 nm_i->nid_cnt[state]++; 1775 if (state == FREE_NID) 1776 list_add_tail(&i->list, &nm_i->free_nid_list); 1777 return 0; 1778 } 1779 1780 static void __remove_free_nid(struct f2fs_sb_info *sbi, 1781 struct free_nid *i, enum nid_state state) 1782 { 1783 struct f2fs_nm_info *nm_i = NM_I(sbi); 1784 1785 f2fs_bug_on(sbi, state != i->state); 1786 nm_i->nid_cnt[state]--; 1787 if (state == FREE_NID) 1788 list_del(&i->list); 1789 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1790 } 1791 1792 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 1793 enum nid_state org_state, enum nid_state dst_state) 1794 { 1795 struct f2fs_nm_info *nm_i = NM_I(sbi); 1796 1797 f2fs_bug_on(sbi, org_state != i->state); 1798 i->state = dst_state; 1799 nm_i->nid_cnt[org_state]--; 1800 nm_i->nid_cnt[dst_state]++; 1801 1802 switch (dst_state) { 1803 case PREALLOC_NID: 1804 list_del(&i->list); 1805 break; 1806 case FREE_NID: 1807 list_add_tail(&i->list, &nm_i->free_nid_list); 1808 break; 1809 default: 1810 BUG_ON(1); 1811 } 1812 } 1813 1814 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 1815 bool set, bool build) 1816 { 1817 struct f2fs_nm_info *nm_i = NM_I(sbi); 1818 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 1819 unsigned int nid_ofs = nid - START_NID(nid); 1820 1821 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 1822 return; 1823 1824 if (set) { 1825 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 1826 return; 1827 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1828 nm_i->free_nid_count[nat_ofs]++; 1829 } else { 1830 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 1831 return; 1832 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1833 if (!build) 1834 nm_i->free_nid_count[nat_ofs]--; 1835 } 1836 } 1837 1838 /* return if the nid is recognized as free */ 1839 static bool add_free_nid(struct f2fs_sb_info *sbi, 1840 nid_t nid, bool build, bool update) 1841 { 1842 struct f2fs_nm_info *nm_i = NM_I(sbi); 1843 struct free_nid *i, *e; 1844 struct nat_entry *ne; 1845 int err = -EINVAL; 1846 bool ret = false; 1847 1848 /* 0 nid should not be used */ 1849 if (unlikely(nid == 0)) 1850 return false; 1851 1852 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1853 i->nid = nid; 1854 i->state = FREE_NID; 1855 1856 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 1857 1858 spin_lock(&nm_i->nid_list_lock); 1859 1860 if (build) { 1861 /* 1862 * Thread A Thread B 1863 * - f2fs_create 1864 * - f2fs_new_inode 1865 * - alloc_nid 1866 * - __insert_nid_to_list(PREALLOC_NID) 1867 * - f2fs_balance_fs_bg 1868 * - build_free_nids 1869 * - __build_free_nids 1870 * - scan_nat_page 1871 * - add_free_nid 1872 * - __lookup_nat_cache 1873 * - f2fs_add_link 1874 * - init_inode_metadata 1875 * - new_inode_page 1876 * - new_node_page 1877 * - set_node_addr 1878 * - alloc_nid_done 1879 * - __remove_nid_from_list(PREALLOC_NID) 1880 * - __insert_nid_to_list(FREE_NID) 1881 */ 1882 ne = __lookup_nat_cache(nm_i, nid); 1883 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 1884 nat_get_blkaddr(ne) != NULL_ADDR)) 1885 goto err_out; 1886 1887 e = __lookup_free_nid_list(nm_i, nid); 1888 if (e) { 1889 if (e->state == FREE_NID) 1890 ret = true; 1891 goto err_out; 1892 } 1893 } 1894 ret = true; 1895 err = __insert_free_nid(sbi, i, FREE_NID); 1896 err_out: 1897 if (update) { 1898 update_free_nid_bitmap(sbi, nid, ret, build); 1899 if (!build) 1900 nm_i->available_nids++; 1901 } 1902 spin_unlock(&nm_i->nid_list_lock); 1903 radix_tree_preload_end(); 1904 1905 if (err) 1906 kmem_cache_free(free_nid_slab, i); 1907 return ret; 1908 } 1909 1910 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 1911 { 1912 struct f2fs_nm_info *nm_i = NM_I(sbi); 1913 struct free_nid *i; 1914 bool need_free = false; 1915 1916 spin_lock(&nm_i->nid_list_lock); 1917 i = __lookup_free_nid_list(nm_i, nid); 1918 if (i && i->state == FREE_NID) { 1919 __remove_free_nid(sbi, i, FREE_NID); 1920 need_free = true; 1921 } 1922 spin_unlock(&nm_i->nid_list_lock); 1923 1924 if (need_free) 1925 kmem_cache_free(free_nid_slab, i); 1926 } 1927 1928 static void scan_nat_page(struct f2fs_sb_info *sbi, 1929 struct page *nat_page, nid_t start_nid) 1930 { 1931 struct f2fs_nm_info *nm_i = NM_I(sbi); 1932 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1933 block_t blk_addr; 1934 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 1935 int i; 1936 1937 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1938 1939 i = start_nid % NAT_ENTRY_PER_BLOCK; 1940 1941 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1942 if (unlikely(start_nid >= nm_i->max_nid)) 1943 break; 1944 1945 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1946 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1947 if (blk_addr == NULL_ADDR) { 1948 add_free_nid(sbi, start_nid, true, true); 1949 } else { 1950 spin_lock(&NM_I(sbi)->nid_list_lock); 1951 update_free_nid_bitmap(sbi, start_nid, false, true); 1952 spin_unlock(&NM_I(sbi)->nid_list_lock); 1953 } 1954 } 1955 } 1956 1957 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 1958 { 1959 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1960 struct f2fs_journal *journal = curseg->journal; 1961 int i; 1962 1963 down_read(&curseg->journal_rwsem); 1964 for (i = 0; i < nats_in_cursum(journal); i++) { 1965 block_t addr; 1966 nid_t nid; 1967 1968 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 1969 nid = le32_to_cpu(nid_in_journal(journal, i)); 1970 if (addr == NULL_ADDR) 1971 add_free_nid(sbi, nid, true, false); 1972 else 1973 remove_free_nid(sbi, nid); 1974 } 1975 up_read(&curseg->journal_rwsem); 1976 } 1977 1978 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 1979 { 1980 struct f2fs_nm_info *nm_i = NM_I(sbi); 1981 unsigned int i, idx; 1982 nid_t nid; 1983 1984 down_read(&nm_i->nat_tree_lock); 1985 1986 for (i = 0; i < nm_i->nat_blocks; i++) { 1987 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 1988 continue; 1989 if (!nm_i->free_nid_count[i]) 1990 continue; 1991 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 1992 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 1993 NAT_ENTRY_PER_BLOCK, idx); 1994 if (idx >= NAT_ENTRY_PER_BLOCK) 1995 break; 1996 1997 nid = i * NAT_ENTRY_PER_BLOCK + idx; 1998 add_free_nid(sbi, nid, true, false); 1999 2000 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2001 goto out; 2002 } 2003 } 2004 out: 2005 scan_curseg_cache(sbi); 2006 2007 up_read(&nm_i->nat_tree_lock); 2008 } 2009 2010 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2011 { 2012 struct f2fs_nm_info *nm_i = NM_I(sbi); 2013 int i = 0; 2014 nid_t nid = nm_i->next_scan_nid; 2015 2016 if (unlikely(nid >= nm_i->max_nid)) 2017 nid = 0; 2018 2019 /* Enough entries */ 2020 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2021 return; 2022 2023 if (!sync && !available_free_memory(sbi, FREE_NIDS)) 2024 return; 2025 2026 if (!mount) { 2027 /* try to find free nids in free_nid_bitmap */ 2028 scan_free_nid_bits(sbi); 2029 2030 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2031 return; 2032 } 2033 2034 /* readahead nat pages to be scanned */ 2035 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2036 META_NAT, true); 2037 2038 down_read(&nm_i->nat_tree_lock); 2039 2040 while (1) { 2041 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2042 nm_i->nat_block_bitmap)) { 2043 struct page *page = get_current_nat_page(sbi, nid); 2044 2045 scan_nat_page(sbi, page, nid); 2046 f2fs_put_page(page, 1); 2047 } 2048 2049 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2050 if (unlikely(nid >= nm_i->max_nid)) 2051 nid = 0; 2052 2053 if (++i >= FREE_NID_PAGES) 2054 break; 2055 } 2056 2057 /* go to the next free nat pages to find free nids abundantly */ 2058 nm_i->next_scan_nid = nid; 2059 2060 /* find free nids from current sum_pages */ 2061 scan_curseg_cache(sbi); 2062 2063 up_read(&nm_i->nat_tree_lock); 2064 2065 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2066 nm_i->ra_nid_pages, META_NAT, false); 2067 } 2068 2069 void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2070 { 2071 mutex_lock(&NM_I(sbi)->build_lock); 2072 __build_free_nids(sbi, sync, mount); 2073 mutex_unlock(&NM_I(sbi)->build_lock); 2074 } 2075 2076 /* 2077 * If this function returns success, caller can obtain a new nid 2078 * from second parameter of this function. 2079 * The returned nid could be used ino as well as nid when inode is created. 2080 */ 2081 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2082 { 2083 struct f2fs_nm_info *nm_i = NM_I(sbi); 2084 struct free_nid *i = NULL; 2085 retry: 2086 #ifdef CONFIG_F2FS_FAULT_INJECTION 2087 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { 2088 f2fs_show_injection_info(FAULT_ALLOC_NID); 2089 return false; 2090 } 2091 #endif 2092 spin_lock(&nm_i->nid_list_lock); 2093 2094 if (unlikely(nm_i->available_nids == 0)) { 2095 spin_unlock(&nm_i->nid_list_lock); 2096 return false; 2097 } 2098 2099 /* We should not use stale free nids created by build_free_nids */ 2100 if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) { 2101 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2102 i = list_first_entry(&nm_i->free_nid_list, 2103 struct free_nid, list); 2104 *nid = i->nid; 2105 2106 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2107 nm_i->available_nids--; 2108 2109 update_free_nid_bitmap(sbi, *nid, false, false); 2110 2111 spin_unlock(&nm_i->nid_list_lock); 2112 return true; 2113 } 2114 spin_unlock(&nm_i->nid_list_lock); 2115 2116 /* Let's scan nat pages and its caches to get free nids */ 2117 build_free_nids(sbi, true, false); 2118 goto retry; 2119 } 2120 2121 /* 2122 * alloc_nid() should be called prior to this function. 2123 */ 2124 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2125 { 2126 struct f2fs_nm_info *nm_i = NM_I(sbi); 2127 struct free_nid *i; 2128 2129 spin_lock(&nm_i->nid_list_lock); 2130 i = __lookup_free_nid_list(nm_i, nid); 2131 f2fs_bug_on(sbi, !i); 2132 __remove_free_nid(sbi, i, PREALLOC_NID); 2133 spin_unlock(&nm_i->nid_list_lock); 2134 2135 kmem_cache_free(free_nid_slab, i); 2136 } 2137 2138 /* 2139 * alloc_nid() should be called prior to this function. 2140 */ 2141 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2142 { 2143 struct f2fs_nm_info *nm_i = NM_I(sbi); 2144 struct free_nid *i; 2145 bool need_free = false; 2146 2147 if (!nid) 2148 return; 2149 2150 spin_lock(&nm_i->nid_list_lock); 2151 i = __lookup_free_nid_list(nm_i, nid); 2152 f2fs_bug_on(sbi, !i); 2153 2154 if (!available_free_memory(sbi, FREE_NIDS)) { 2155 __remove_free_nid(sbi, i, PREALLOC_NID); 2156 need_free = true; 2157 } else { 2158 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2159 } 2160 2161 nm_i->available_nids++; 2162 2163 update_free_nid_bitmap(sbi, nid, true, false); 2164 2165 spin_unlock(&nm_i->nid_list_lock); 2166 2167 if (need_free) 2168 kmem_cache_free(free_nid_slab, i); 2169 } 2170 2171 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2172 { 2173 struct f2fs_nm_info *nm_i = NM_I(sbi); 2174 struct free_nid *i, *next; 2175 int nr = nr_shrink; 2176 2177 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2178 return 0; 2179 2180 if (!mutex_trylock(&nm_i->build_lock)) 2181 return 0; 2182 2183 spin_lock(&nm_i->nid_list_lock); 2184 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2185 if (nr_shrink <= 0 || 2186 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2187 break; 2188 2189 __remove_free_nid(sbi, i, FREE_NID); 2190 kmem_cache_free(free_nid_slab, i); 2191 nr_shrink--; 2192 } 2193 spin_unlock(&nm_i->nid_list_lock); 2194 mutex_unlock(&nm_i->build_lock); 2195 2196 return nr - nr_shrink; 2197 } 2198 2199 void recover_inline_xattr(struct inode *inode, struct page *page) 2200 { 2201 void *src_addr, *dst_addr; 2202 size_t inline_size; 2203 struct page *ipage; 2204 struct f2fs_inode *ri; 2205 2206 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 2207 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 2208 2209 ri = F2FS_INODE(page); 2210 if (ri->i_inline & F2FS_INLINE_XATTR) { 2211 set_inode_flag(inode, FI_INLINE_XATTR); 2212 } else { 2213 clear_inode_flag(inode, FI_INLINE_XATTR); 2214 goto update_inode; 2215 } 2216 2217 dst_addr = inline_xattr_addr(inode, ipage); 2218 src_addr = inline_xattr_addr(inode, page); 2219 inline_size = inline_xattr_size(inode); 2220 2221 f2fs_wait_on_page_writeback(ipage, NODE, true); 2222 memcpy(dst_addr, src_addr, inline_size); 2223 update_inode: 2224 update_inode(inode, ipage); 2225 f2fs_put_page(ipage, 1); 2226 } 2227 2228 int recover_xattr_data(struct inode *inode, struct page *page) 2229 { 2230 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2231 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2232 nid_t new_xnid; 2233 struct dnode_of_data dn; 2234 struct node_info ni; 2235 struct page *xpage; 2236 2237 if (!prev_xnid) 2238 goto recover_xnid; 2239 2240 /* 1: invalidate the previous xattr nid */ 2241 get_node_info(sbi, prev_xnid, &ni); 2242 invalidate_blocks(sbi, ni.blk_addr); 2243 dec_valid_node_count(sbi, inode, false); 2244 set_node_addr(sbi, &ni, NULL_ADDR, false); 2245 2246 recover_xnid: 2247 /* 2: update xattr nid in inode */ 2248 if (!alloc_nid(sbi, &new_xnid)) 2249 return -ENOSPC; 2250 2251 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2252 xpage = new_node_page(&dn, XATTR_NODE_OFFSET); 2253 if (IS_ERR(xpage)) { 2254 alloc_nid_failed(sbi, new_xnid); 2255 return PTR_ERR(xpage); 2256 } 2257 2258 alloc_nid_done(sbi, new_xnid); 2259 update_inode_page(inode); 2260 2261 /* 3: update and set xattr node page dirty */ 2262 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE); 2263 2264 set_page_dirty(xpage); 2265 f2fs_put_page(xpage, 1); 2266 2267 return 0; 2268 } 2269 2270 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2271 { 2272 struct f2fs_inode *src, *dst; 2273 nid_t ino = ino_of_node(page); 2274 struct node_info old_ni, new_ni; 2275 struct page *ipage; 2276 2277 get_node_info(sbi, ino, &old_ni); 2278 2279 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2280 return -EINVAL; 2281 retry: 2282 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2283 if (!ipage) { 2284 congestion_wait(BLK_RW_ASYNC, HZ/50); 2285 goto retry; 2286 } 2287 2288 /* Should not use this inode from free nid list */ 2289 remove_free_nid(sbi, ino); 2290 2291 if (!PageUptodate(ipage)) 2292 SetPageUptodate(ipage); 2293 fill_node_footer(ipage, ino, ino, 0, true); 2294 2295 src = F2FS_INODE(page); 2296 dst = F2FS_INODE(ipage); 2297 2298 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 2299 dst->i_size = 0; 2300 dst->i_blocks = cpu_to_le64(1); 2301 dst->i_links = cpu_to_le32(1); 2302 dst->i_xattr_nid = 0; 2303 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2304 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2305 dst->i_extra_isize = src->i_extra_isize; 2306 2307 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) && 2308 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2309 i_inline_xattr_size)) 2310 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2311 2312 if (f2fs_sb_has_project_quota(sbi->sb) && 2313 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2314 i_projid)) 2315 dst->i_projid = src->i_projid; 2316 } 2317 2318 new_ni = old_ni; 2319 new_ni.ino = ino; 2320 2321 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2322 WARN_ON(1); 2323 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2324 inc_valid_inode_count(sbi); 2325 set_page_dirty(ipage); 2326 f2fs_put_page(ipage, 1); 2327 return 0; 2328 } 2329 2330 void restore_node_summary(struct f2fs_sb_info *sbi, 2331 unsigned int segno, struct f2fs_summary_block *sum) 2332 { 2333 struct f2fs_node *rn; 2334 struct f2fs_summary *sum_entry; 2335 block_t addr; 2336 int i, idx, last_offset, nrpages; 2337 2338 /* scan the node segment */ 2339 last_offset = sbi->blocks_per_seg; 2340 addr = START_BLOCK(sbi, segno); 2341 sum_entry = &sum->entries[0]; 2342 2343 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2344 nrpages = min(last_offset - i, BIO_MAX_PAGES); 2345 2346 /* readahead node pages */ 2347 ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2348 2349 for (idx = addr; idx < addr + nrpages; idx++) { 2350 struct page *page = get_tmp_page(sbi, idx); 2351 2352 rn = F2FS_NODE(page); 2353 sum_entry->nid = rn->footer.nid; 2354 sum_entry->version = 0; 2355 sum_entry->ofs_in_node = 0; 2356 sum_entry++; 2357 f2fs_put_page(page, 1); 2358 } 2359 2360 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2361 addr + nrpages); 2362 } 2363 } 2364 2365 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2366 { 2367 struct f2fs_nm_info *nm_i = NM_I(sbi); 2368 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2369 struct f2fs_journal *journal = curseg->journal; 2370 int i; 2371 2372 down_write(&curseg->journal_rwsem); 2373 for (i = 0; i < nats_in_cursum(journal); i++) { 2374 struct nat_entry *ne; 2375 struct f2fs_nat_entry raw_ne; 2376 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2377 2378 raw_ne = nat_in_journal(journal, i); 2379 2380 ne = __lookup_nat_cache(nm_i, nid); 2381 if (!ne) { 2382 ne = __alloc_nat_entry(nid, true); 2383 __init_nat_entry(nm_i, ne, &raw_ne, true); 2384 } 2385 2386 /* 2387 * if a free nat in journal has not been used after last 2388 * checkpoint, we should remove it from available nids, 2389 * since later we will add it again. 2390 */ 2391 if (!get_nat_flag(ne, IS_DIRTY) && 2392 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2393 spin_lock(&nm_i->nid_list_lock); 2394 nm_i->available_nids--; 2395 spin_unlock(&nm_i->nid_list_lock); 2396 } 2397 2398 __set_nat_cache_dirty(nm_i, ne); 2399 } 2400 update_nats_in_cursum(journal, -i); 2401 up_write(&curseg->journal_rwsem); 2402 } 2403 2404 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2405 struct list_head *head, int max) 2406 { 2407 struct nat_entry_set *cur; 2408 2409 if (nes->entry_cnt >= max) 2410 goto add_out; 2411 2412 list_for_each_entry(cur, head, set_list) { 2413 if (cur->entry_cnt >= nes->entry_cnt) { 2414 list_add(&nes->set_list, cur->set_list.prev); 2415 return; 2416 } 2417 } 2418 add_out: 2419 list_add_tail(&nes->set_list, head); 2420 } 2421 2422 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2423 struct page *page) 2424 { 2425 struct f2fs_nm_info *nm_i = NM_I(sbi); 2426 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2427 struct f2fs_nat_block *nat_blk = page_address(page); 2428 int valid = 0; 2429 int i = 0; 2430 2431 if (!enabled_nat_bits(sbi, NULL)) 2432 return; 2433 2434 if (nat_index == 0) { 2435 valid = 1; 2436 i = 1; 2437 } 2438 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2439 if (nat_blk->entries[i].block_addr != NULL_ADDR) 2440 valid++; 2441 } 2442 if (valid == 0) { 2443 __set_bit_le(nat_index, nm_i->empty_nat_bits); 2444 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2445 return; 2446 } 2447 2448 __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2449 if (valid == NAT_ENTRY_PER_BLOCK) 2450 __set_bit_le(nat_index, nm_i->full_nat_bits); 2451 else 2452 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2453 } 2454 2455 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2456 struct nat_entry_set *set, struct cp_control *cpc) 2457 { 2458 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2459 struct f2fs_journal *journal = curseg->journal; 2460 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2461 bool to_journal = true; 2462 struct f2fs_nat_block *nat_blk; 2463 struct nat_entry *ne, *cur; 2464 struct page *page = NULL; 2465 2466 /* 2467 * there are two steps to flush nat entries: 2468 * #1, flush nat entries to journal in current hot data summary block. 2469 * #2, flush nat entries to nat page. 2470 */ 2471 if (enabled_nat_bits(sbi, cpc) || 2472 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2473 to_journal = false; 2474 2475 if (to_journal) { 2476 down_write(&curseg->journal_rwsem); 2477 } else { 2478 page = get_next_nat_page(sbi, start_nid); 2479 nat_blk = page_address(page); 2480 f2fs_bug_on(sbi, !nat_blk); 2481 } 2482 2483 /* flush dirty nats in nat entry set */ 2484 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 2485 struct f2fs_nat_entry *raw_ne; 2486 nid_t nid = nat_get_nid(ne); 2487 int offset; 2488 2489 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 2490 2491 if (to_journal) { 2492 offset = lookup_journal_in_cursum(journal, 2493 NAT_JOURNAL, nid, 1); 2494 f2fs_bug_on(sbi, offset < 0); 2495 raw_ne = &nat_in_journal(journal, offset); 2496 nid_in_journal(journal, offset) = cpu_to_le32(nid); 2497 } else { 2498 raw_ne = &nat_blk->entries[nid - start_nid]; 2499 } 2500 raw_nat_from_node_info(raw_ne, &ne->ni); 2501 nat_reset_flag(ne); 2502 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 2503 if (nat_get_blkaddr(ne) == NULL_ADDR) { 2504 add_free_nid(sbi, nid, false, true); 2505 } else { 2506 spin_lock(&NM_I(sbi)->nid_list_lock); 2507 update_free_nid_bitmap(sbi, nid, false, false); 2508 spin_unlock(&NM_I(sbi)->nid_list_lock); 2509 } 2510 } 2511 2512 if (to_journal) { 2513 up_write(&curseg->journal_rwsem); 2514 } else { 2515 __update_nat_bits(sbi, start_nid, page); 2516 f2fs_put_page(page, 1); 2517 } 2518 2519 /* Allow dirty nats by node block allocation in write_begin */ 2520 if (!set->entry_cnt) { 2521 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 2522 kmem_cache_free(nat_entry_set_slab, set); 2523 } 2524 } 2525 2526 /* 2527 * This function is called during the checkpointing process. 2528 */ 2529 void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 2530 { 2531 struct f2fs_nm_info *nm_i = NM_I(sbi); 2532 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2533 struct f2fs_journal *journal = curseg->journal; 2534 struct nat_entry_set *setvec[SETVEC_SIZE]; 2535 struct nat_entry_set *set, *tmp; 2536 unsigned int found; 2537 nid_t set_idx = 0; 2538 LIST_HEAD(sets); 2539 2540 if (!nm_i->dirty_nat_cnt) 2541 return; 2542 2543 down_write(&nm_i->nat_tree_lock); 2544 2545 /* 2546 * if there are no enough space in journal to store dirty nat 2547 * entries, remove all entries from journal and merge them 2548 * into nat entry set. 2549 */ 2550 if (enabled_nat_bits(sbi, cpc) || 2551 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 2552 remove_nats_in_journal(sbi); 2553 2554 while ((found = __gang_lookup_nat_set(nm_i, 2555 set_idx, SETVEC_SIZE, setvec))) { 2556 unsigned idx; 2557 set_idx = setvec[found - 1]->set + 1; 2558 for (idx = 0; idx < found; idx++) 2559 __adjust_nat_entry_set(setvec[idx], &sets, 2560 MAX_NAT_JENTRIES(journal)); 2561 } 2562 2563 /* flush dirty nats in nat entry set */ 2564 list_for_each_entry_safe(set, tmp, &sets, set_list) 2565 __flush_nat_entry_set(sbi, set, cpc); 2566 2567 up_write(&nm_i->nat_tree_lock); 2568 /* Allow dirty nats by node block allocation in write_begin */ 2569 } 2570 2571 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 2572 { 2573 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2574 struct f2fs_nm_info *nm_i = NM_I(sbi); 2575 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 2576 unsigned int i; 2577 __u64 cp_ver = cur_cp_version(ckpt); 2578 block_t nat_bits_addr; 2579 2580 if (!enabled_nat_bits(sbi, NULL)) 2581 return 0; 2582 2583 nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 + 2584 F2FS_BLKSIZE - 1); 2585 nm_i->nat_bits = f2fs_kzalloc(sbi, 2586 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 2587 if (!nm_i->nat_bits) 2588 return -ENOMEM; 2589 2590 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - 2591 nm_i->nat_bits_blocks; 2592 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 2593 struct page *page = get_meta_page(sbi, nat_bits_addr++); 2594 2595 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 2596 page_address(page), F2FS_BLKSIZE); 2597 f2fs_put_page(page, 1); 2598 } 2599 2600 cp_ver |= (cur_cp_crc(ckpt) << 32); 2601 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 2602 disable_nat_bits(sbi, true); 2603 return 0; 2604 } 2605 2606 nm_i->full_nat_bits = nm_i->nat_bits + 8; 2607 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 2608 2609 f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); 2610 return 0; 2611 } 2612 2613 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 2614 { 2615 struct f2fs_nm_info *nm_i = NM_I(sbi); 2616 unsigned int i = 0; 2617 nid_t nid, last_nid; 2618 2619 if (!enabled_nat_bits(sbi, NULL)) 2620 return; 2621 2622 for (i = 0; i < nm_i->nat_blocks; i++) { 2623 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 2624 if (i >= nm_i->nat_blocks) 2625 break; 2626 2627 __set_bit_le(i, nm_i->nat_block_bitmap); 2628 2629 nid = i * NAT_ENTRY_PER_BLOCK; 2630 last_nid = nid + NAT_ENTRY_PER_BLOCK; 2631 2632 spin_lock(&NM_I(sbi)->nid_list_lock); 2633 for (; nid < last_nid; nid++) 2634 update_free_nid_bitmap(sbi, nid, true, true); 2635 spin_unlock(&NM_I(sbi)->nid_list_lock); 2636 } 2637 2638 for (i = 0; i < nm_i->nat_blocks; i++) { 2639 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 2640 if (i >= nm_i->nat_blocks) 2641 break; 2642 2643 __set_bit_le(i, nm_i->nat_block_bitmap); 2644 } 2645 } 2646 2647 static int init_node_manager(struct f2fs_sb_info *sbi) 2648 { 2649 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 2650 struct f2fs_nm_info *nm_i = NM_I(sbi); 2651 unsigned char *version_bitmap; 2652 unsigned int nat_segs; 2653 int err; 2654 2655 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 2656 2657 /* segment_count_nat includes pair segment so divide to 2. */ 2658 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 2659 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 2660 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 2661 2662 /* not used nids: 0, node, meta, (and root counted as valid node) */ 2663 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 2664 sbi->nquota_files - F2FS_RESERVED_NODE_NUM; 2665 nm_i->nid_cnt[FREE_NID] = 0; 2666 nm_i->nid_cnt[PREALLOC_NID] = 0; 2667 nm_i->nat_cnt = 0; 2668 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 2669 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 2670 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 2671 2672 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 2673 INIT_LIST_HEAD(&nm_i->free_nid_list); 2674 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 2675 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 2676 INIT_LIST_HEAD(&nm_i->nat_entries); 2677 2678 mutex_init(&nm_i->build_lock); 2679 spin_lock_init(&nm_i->nid_list_lock); 2680 init_rwsem(&nm_i->nat_tree_lock); 2681 2682 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 2683 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 2684 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 2685 if (!version_bitmap) 2686 return -EFAULT; 2687 2688 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 2689 GFP_KERNEL); 2690 if (!nm_i->nat_bitmap) 2691 return -ENOMEM; 2692 2693 err = __get_nat_bitmaps(sbi); 2694 if (err) 2695 return err; 2696 2697 #ifdef CONFIG_F2FS_CHECK_FS 2698 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 2699 GFP_KERNEL); 2700 if (!nm_i->nat_bitmap_mir) 2701 return -ENOMEM; 2702 #endif 2703 2704 return 0; 2705 } 2706 2707 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 2708 { 2709 struct f2fs_nm_info *nm_i = NM_I(sbi); 2710 2711 nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks * 2712 NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); 2713 if (!nm_i->free_nid_bitmap) 2714 return -ENOMEM; 2715 2716 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 2717 GFP_KERNEL); 2718 if (!nm_i->nat_block_bitmap) 2719 return -ENOMEM; 2720 2721 nm_i->free_nid_count = f2fs_kvzalloc(sbi, nm_i->nat_blocks * 2722 sizeof(unsigned short), GFP_KERNEL); 2723 if (!nm_i->free_nid_count) 2724 return -ENOMEM; 2725 return 0; 2726 } 2727 2728 int build_node_manager(struct f2fs_sb_info *sbi) 2729 { 2730 int err; 2731 2732 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 2733 GFP_KERNEL); 2734 if (!sbi->nm_info) 2735 return -ENOMEM; 2736 2737 err = init_node_manager(sbi); 2738 if (err) 2739 return err; 2740 2741 err = init_free_nid_cache(sbi); 2742 if (err) 2743 return err; 2744 2745 /* load free nid status from nat_bits table */ 2746 load_free_nid_bitmap(sbi); 2747 2748 build_free_nids(sbi, true, true); 2749 return 0; 2750 } 2751 2752 void destroy_node_manager(struct f2fs_sb_info *sbi) 2753 { 2754 struct f2fs_nm_info *nm_i = NM_I(sbi); 2755 struct free_nid *i, *next_i; 2756 struct nat_entry *natvec[NATVEC_SIZE]; 2757 struct nat_entry_set *setvec[SETVEC_SIZE]; 2758 nid_t nid = 0; 2759 unsigned int found; 2760 2761 if (!nm_i) 2762 return; 2763 2764 /* destroy free nid list */ 2765 spin_lock(&nm_i->nid_list_lock); 2766 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2767 __remove_free_nid(sbi, i, FREE_NID); 2768 spin_unlock(&nm_i->nid_list_lock); 2769 kmem_cache_free(free_nid_slab, i); 2770 spin_lock(&nm_i->nid_list_lock); 2771 } 2772 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 2773 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 2774 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 2775 spin_unlock(&nm_i->nid_list_lock); 2776 2777 /* destroy nat cache */ 2778 down_write(&nm_i->nat_tree_lock); 2779 while ((found = __gang_lookup_nat_cache(nm_i, 2780 nid, NATVEC_SIZE, natvec))) { 2781 unsigned idx; 2782 2783 nid = nat_get_nid(natvec[found - 1]) + 1; 2784 for (idx = 0; idx < found; idx++) 2785 __del_from_nat_cache(nm_i, natvec[idx]); 2786 } 2787 f2fs_bug_on(sbi, nm_i->nat_cnt); 2788 2789 /* destroy nat set cache */ 2790 nid = 0; 2791 while ((found = __gang_lookup_nat_set(nm_i, 2792 nid, SETVEC_SIZE, setvec))) { 2793 unsigned idx; 2794 2795 nid = setvec[found - 1]->set + 1; 2796 for (idx = 0; idx < found; idx++) { 2797 /* entry_cnt is not zero, when cp_error was occurred */ 2798 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 2799 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 2800 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2801 } 2802 } 2803 up_write(&nm_i->nat_tree_lock); 2804 2805 kvfree(nm_i->nat_block_bitmap); 2806 kvfree(nm_i->free_nid_bitmap); 2807 kvfree(nm_i->free_nid_count); 2808 2809 kfree(nm_i->nat_bitmap); 2810 kfree(nm_i->nat_bits); 2811 #ifdef CONFIG_F2FS_CHECK_FS 2812 kfree(nm_i->nat_bitmap_mir); 2813 #endif 2814 sbi->nm_info = NULL; 2815 kfree(nm_i); 2816 } 2817 2818 int __init create_node_manager_caches(void) 2819 { 2820 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2821 sizeof(struct nat_entry)); 2822 if (!nat_entry_slab) 2823 goto fail; 2824 2825 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2826 sizeof(struct free_nid)); 2827 if (!free_nid_slab) 2828 goto destroy_nat_entry; 2829 2830 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2831 sizeof(struct nat_entry_set)); 2832 if (!nat_entry_set_slab) 2833 goto destroy_free_nid; 2834 return 0; 2835 2836 destroy_free_nid: 2837 kmem_cache_destroy(free_nid_slab); 2838 destroy_nat_entry: 2839 kmem_cache_destroy(nat_entry_slab); 2840 fail: 2841 return -ENOMEM; 2842 } 2843 2844 void destroy_node_manager_caches(void) 2845 { 2846 kmem_cache_destroy(nat_entry_set_slab); 2847 kmem_cache_destroy(free_nid_slab); 2848 kmem_cache_destroy(nat_entry_slab); 2849 } 2850