1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 23 static struct kmem_cache *nat_entry_slab; 24 static struct kmem_cache *free_nid_slab; 25 26 static void clear_node_page_dirty(struct page *page) 27 { 28 struct address_space *mapping = page->mapping; 29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 30 unsigned int long flags; 31 32 if (PageDirty(page)) { 33 spin_lock_irqsave(&mapping->tree_lock, flags); 34 radix_tree_tag_clear(&mapping->page_tree, 35 page_index(page), 36 PAGECACHE_TAG_DIRTY); 37 spin_unlock_irqrestore(&mapping->tree_lock, flags); 38 39 clear_page_dirty_for_io(page); 40 dec_page_count(sbi, F2FS_DIRTY_NODES); 41 } 42 ClearPageUptodate(page); 43 } 44 45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 46 { 47 pgoff_t index = current_nat_addr(sbi, nid); 48 return get_meta_page(sbi, index); 49 } 50 51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 52 { 53 struct page *src_page; 54 struct page *dst_page; 55 pgoff_t src_off; 56 pgoff_t dst_off; 57 void *src_addr; 58 void *dst_addr; 59 struct f2fs_nm_info *nm_i = NM_I(sbi); 60 61 src_off = current_nat_addr(sbi, nid); 62 dst_off = next_nat_addr(sbi, src_off); 63 64 /* get current nat block page with lock */ 65 src_page = get_meta_page(sbi, src_off); 66 67 /* Dirty src_page means that it is already the new target NAT page. */ 68 if (PageDirty(src_page)) 69 return src_page; 70 71 dst_page = grab_meta_page(sbi, dst_off); 72 73 src_addr = page_address(src_page); 74 dst_addr = page_address(dst_page); 75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 76 set_page_dirty(dst_page); 77 f2fs_put_page(src_page, 1); 78 79 set_to_next_nat(nm_i, nid); 80 81 return dst_page; 82 } 83 84 /* 85 * Readahead NAT pages 86 */ 87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) 88 { 89 struct address_space *mapping = sbi->meta_inode->i_mapping; 90 struct f2fs_nm_info *nm_i = NM_I(sbi); 91 struct page *page; 92 pgoff_t index; 93 int i; 94 95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { 96 if (nid >= nm_i->max_nid) 97 nid = 0; 98 index = current_nat_addr(sbi, nid); 99 100 page = grab_cache_page(mapping, index); 101 if (!page) 102 continue; 103 if (PageUptodate(page)) { 104 f2fs_put_page(page, 1); 105 continue; 106 } 107 if (f2fs_readpage(sbi, page, index, READ)) 108 continue; 109 110 f2fs_put_page(page, 0); 111 } 112 } 113 114 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 115 { 116 return radix_tree_lookup(&nm_i->nat_root, n); 117 } 118 119 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 120 nid_t start, unsigned int nr, struct nat_entry **ep) 121 { 122 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 123 } 124 125 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 126 { 127 list_del(&e->list); 128 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 129 nm_i->nat_cnt--; 130 kmem_cache_free(nat_entry_slab, e); 131 } 132 133 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 134 { 135 struct f2fs_nm_info *nm_i = NM_I(sbi); 136 struct nat_entry *e; 137 int is_cp = 1; 138 139 read_lock(&nm_i->nat_tree_lock); 140 e = __lookup_nat_cache(nm_i, nid); 141 if (e && !e->checkpointed) 142 is_cp = 0; 143 read_unlock(&nm_i->nat_tree_lock); 144 return is_cp; 145 } 146 147 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 148 { 149 struct nat_entry *new; 150 151 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 152 if (!new) 153 return NULL; 154 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 155 kmem_cache_free(nat_entry_slab, new); 156 return NULL; 157 } 158 memset(new, 0, sizeof(struct nat_entry)); 159 nat_set_nid(new, nid); 160 list_add_tail(&new->list, &nm_i->nat_entries); 161 nm_i->nat_cnt++; 162 return new; 163 } 164 165 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 166 struct f2fs_nat_entry *ne) 167 { 168 struct nat_entry *e; 169 retry: 170 write_lock(&nm_i->nat_tree_lock); 171 e = __lookup_nat_cache(nm_i, nid); 172 if (!e) { 173 e = grab_nat_entry(nm_i, nid); 174 if (!e) { 175 write_unlock(&nm_i->nat_tree_lock); 176 goto retry; 177 } 178 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); 179 nat_set_ino(e, le32_to_cpu(ne->ino)); 180 nat_set_version(e, ne->version); 181 e->checkpointed = true; 182 } 183 write_unlock(&nm_i->nat_tree_lock); 184 } 185 186 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 187 block_t new_blkaddr) 188 { 189 struct f2fs_nm_info *nm_i = NM_I(sbi); 190 struct nat_entry *e; 191 retry: 192 write_lock(&nm_i->nat_tree_lock); 193 e = __lookup_nat_cache(nm_i, ni->nid); 194 if (!e) { 195 e = grab_nat_entry(nm_i, ni->nid); 196 if (!e) { 197 write_unlock(&nm_i->nat_tree_lock); 198 goto retry; 199 } 200 e->ni = *ni; 201 e->checkpointed = true; 202 BUG_ON(ni->blk_addr == NEW_ADDR); 203 } else if (new_blkaddr == NEW_ADDR) { 204 /* 205 * when nid is reallocated, 206 * previous nat entry can be remained in nat cache. 207 * So, reinitialize it with new information. 208 */ 209 e->ni = *ni; 210 BUG_ON(ni->blk_addr != NULL_ADDR); 211 } 212 213 if (new_blkaddr == NEW_ADDR) 214 e->checkpointed = false; 215 216 /* sanity check */ 217 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr); 218 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR && 219 new_blkaddr == NULL_ADDR); 220 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR && 221 new_blkaddr == NEW_ADDR); 222 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR && 223 nat_get_blkaddr(e) != NULL_ADDR && 224 new_blkaddr == NEW_ADDR); 225 226 /* increament version no as node is removed */ 227 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 228 unsigned char version = nat_get_version(e); 229 nat_set_version(e, inc_node_version(version)); 230 } 231 232 /* change address */ 233 nat_set_blkaddr(e, new_blkaddr); 234 __set_nat_cache_dirty(nm_i, e); 235 write_unlock(&nm_i->nat_tree_lock); 236 } 237 238 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 239 { 240 struct f2fs_nm_info *nm_i = NM_I(sbi); 241 242 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD) 243 return 0; 244 245 write_lock(&nm_i->nat_tree_lock); 246 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 247 struct nat_entry *ne; 248 ne = list_first_entry(&nm_i->nat_entries, 249 struct nat_entry, list); 250 __del_from_nat_cache(nm_i, ne); 251 nr_shrink--; 252 } 253 write_unlock(&nm_i->nat_tree_lock); 254 return nr_shrink; 255 } 256 257 /* 258 * This function returns always success 259 */ 260 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 261 { 262 struct f2fs_nm_info *nm_i = NM_I(sbi); 263 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 264 struct f2fs_summary_block *sum = curseg->sum_blk; 265 nid_t start_nid = START_NID(nid); 266 struct f2fs_nat_block *nat_blk; 267 struct page *page = NULL; 268 struct f2fs_nat_entry ne; 269 struct nat_entry *e; 270 int i; 271 272 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 273 ni->nid = nid; 274 275 /* Check nat cache */ 276 read_lock(&nm_i->nat_tree_lock); 277 e = __lookup_nat_cache(nm_i, nid); 278 if (e) { 279 ni->ino = nat_get_ino(e); 280 ni->blk_addr = nat_get_blkaddr(e); 281 ni->version = nat_get_version(e); 282 } 283 read_unlock(&nm_i->nat_tree_lock); 284 if (e) 285 return; 286 287 /* Check current segment summary */ 288 mutex_lock(&curseg->curseg_mutex); 289 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 290 if (i >= 0) { 291 ne = nat_in_journal(sum, i); 292 node_info_from_raw_nat(ni, &ne); 293 } 294 mutex_unlock(&curseg->curseg_mutex); 295 if (i >= 0) 296 goto cache; 297 298 /* Fill node_info from nat page */ 299 page = get_current_nat_page(sbi, start_nid); 300 nat_blk = (struct f2fs_nat_block *)page_address(page); 301 ne = nat_blk->entries[nid - start_nid]; 302 node_info_from_raw_nat(ni, &ne); 303 f2fs_put_page(page, 1); 304 cache: 305 /* cache nat entry */ 306 cache_nat_entry(NM_I(sbi), nid, &ne); 307 } 308 309 /* 310 * The maximum depth is four. 311 * Offset[0] will have raw inode offset. 312 */ 313 static int get_node_path(long block, int offset[4], unsigned int noffset[4]) 314 { 315 const long direct_index = ADDRS_PER_INODE; 316 const long direct_blks = ADDRS_PER_BLOCK; 317 const long dptrs_per_blk = NIDS_PER_BLOCK; 318 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 319 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 320 int n = 0; 321 int level = 0; 322 323 noffset[0] = 0; 324 325 if (block < direct_index) { 326 offset[n] = block; 327 goto got; 328 } 329 block -= direct_index; 330 if (block < direct_blks) { 331 offset[n++] = NODE_DIR1_BLOCK; 332 noffset[n] = 1; 333 offset[n] = block; 334 level = 1; 335 goto got; 336 } 337 block -= direct_blks; 338 if (block < direct_blks) { 339 offset[n++] = NODE_DIR2_BLOCK; 340 noffset[n] = 2; 341 offset[n] = block; 342 level = 1; 343 goto got; 344 } 345 block -= direct_blks; 346 if (block < indirect_blks) { 347 offset[n++] = NODE_IND1_BLOCK; 348 noffset[n] = 3; 349 offset[n++] = block / direct_blks; 350 noffset[n] = 4 + offset[n - 1]; 351 offset[n] = block % direct_blks; 352 level = 2; 353 goto got; 354 } 355 block -= indirect_blks; 356 if (block < indirect_blks) { 357 offset[n++] = NODE_IND2_BLOCK; 358 noffset[n] = 4 + dptrs_per_blk; 359 offset[n++] = block / direct_blks; 360 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 361 offset[n] = block % direct_blks; 362 level = 2; 363 goto got; 364 } 365 block -= indirect_blks; 366 if (block < dindirect_blks) { 367 offset[n++] = NODE_DIND_BLOCK; 368 noffset[n] = 5 + (dptrs_per_blk * 2); 369 offset[n++] = block / indirect_blks; 370 noffset[n] = 6 + (dptrs_per_blk * 2) + 371 offset[n - 1] * (dptrs_per_blk + 1); 372 offset[n++] = (block / direct_blks) % dptrs_per_blk; 373 noffset[n] = 7 + (dptrs_per_blk * 2) + 374 offset[n - 2] * (dptrs_per_blk + 1) + 375 offset[n - 1]; 376 offset[n] = block % direct_blks; 377 level = 3; 378 goto got; 379 } else { 380 BUG(); 381 } 382 got: 383 return level; 384 } 385 386 /* 387 * Caller should call f2fs_put_dnode(dn). 388 * Also, it should grab and release a mutex by calling mutex_lock_op() and 389 * mutex_unlock_op() only if ro is not set RDONLY_NODE. 390 * In the case of RDONLY_NODE, we don't need to care about mutex. 391 */ 392 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 393 { 394 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 395 struct page *npage[4]; 396 struct page *parent; 397 int offset[4]; 398 unsigned int noffset[4]; 399 nid_t nids[4]; 400 int level, i; 401 int err = 0; 402 403 level = get_node_path(index, offset, noffset); 404 405 nids[0] = dn->inode->i_ino; 406 npage[0] = get_node_page(sbi, nids[0]); 407 if (IS_ERR(npage[0])) 408 return PTR_ERR(npage[0]); 409 410 parent = npage[0]; 411 if (level != 0) 412 nids[1] = get_nid(parent, offset[0], true); 413 dn->inode_page = npage[0]; 414 dn->inode_page_locked = true; 415 416 /* get indirect or direct nodes */ 417 for (i = 1; i <= level; i++) { 418 bool done = false; 419 420 if (!nids[i] && mode == ALLOC_NODE) { 421 /* alloc new node */ 422 if (!alloc_nid(sbi, &(nids[i]))) { 423 err = -ENOSPC; 424 goto release_pages; 425 } 426 427 dn->nid = nids[i]; 428 npage[i] = new_node_page(dn, noffset[i]); 429 if (IS_ERR(npage[i])) { 430 alloc_nid_failed(sbi, nids[i]); 431 err = PTR_ERR(npage[i]); 432 goto release_pages; 433 } 434 435 set_nid(parent, offset[i - 1], nids[i], i == 1); 436 alloc_nid_done(sbi, nids[i]); 437 done = true; 438 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 439 npage[i] = get_node_page_ra(parent, offset[i - 1]); 440 if (IS_ERR(npage[i])) { 441 err = PTR_ERR(npage[i]); 442 goto release_pages; 443 } 444 done = true; 445 } 446 if (i == 1) { 447 dn->inode_page_locked = false; 448 unlock_page(parent); 449 } else { 450 f2fs_put_page(parent, 1); 451 } 452 453 if (!done) { 454 npage[i] = get_node_page(sbi, nids[i]); 455 if (IS_ERR(npage[i])) { 456 err = PTR_ERR(npage[i]); 457 f2fs_put_page(npage[0], 0); 458 goto release_out; 459 } 460 } 461 if (i < level) { 462 parent = npage[i]; 463 nids[i + 1] = get_nid(parent, offset[i], false); 464 } 465 } 466 dn->nid = nids[level]; 467 dn->ofs_in_node = offset[level]; 468 dn->node_page = npage[level]; 469 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 470 return 0; 471 472 release_pages: 473 f2fs_put_page(parent, 1); 474 if (i > 1) 475 f2fs_put_page(npage[0], 0); 476 release_out: 477 dn->inode_page = NULL; 478 dn->node_page = NULL; 479 return err; 480 } 481 482 static void truncate_node(struct dnode_of_data *dn) 483 { 484 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 485 struct node_info ni; 486 487 get_node_info(sbi, dn->nid, &ni); 488 if (dn->inode->i_blocks == 0) { 489 BUG_ON(ni.blk_addr != NULL_ADDR); 490 goto invalidate; 491 } 492 BUG_ON(ni.blk_addr == NULL_ADDR); 493 494 /* Deallocate node address */ 495 invalidate_blocks(sbi, ni.blk_addr); 496 dec_valid_node_count(sbi, dn->inode, 1); 497 set_node_addr(sbi, &ni, NULL_ADDR); 498 499 if (dn->nid == dn->inode->i_ino) { 500 remove_orphan_inode(sbi, dn->nid); 501 dec_valid_inode_count(sbi); 502 } else { 503 sync_inode_page(dn); 504 } 505 invalidate: 506 clear_node_page_dirty(dn->node_page); 507 F2FS_SET_SB_DIRT(sbi); 508 509 f2fs_put_page(dn->node_page, 1); 510 dn->node_page = NULL; 511 } 512 513 static int truncate_dnode(struct dnode_of_data *dn) 514 { 515 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 516 struct page *page; 517 518 if (dn->nid == 0) 519 return 1; 520 521 /* get direct node */ 522 page = get_node_page(sbi, dn->nid); 523 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 524 return 1; 525 else if (IS_ERR(page)) 526 return PTR_ERR(page); 527 528 /* Make dnode_of_data for parameter */ 529 dn->node_page = page; 530 dn->ofs_in_node = 0; 531 truncate_data_blocks(dn); 532 truncate_node(dn); 533 return 1; 534 } 535 536 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 537 int ofs, int depth) 538 { 539 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 540 struct dnode_of_data rdn = *dn; 541 struct page *page; 542 struct f2fs_node *rn; 543 nid_t child_nid; 544 unsigned int child_nofs; 545 int freed = 0; 546 int i, ret; 547 548 if (dn->nid == 0) 549 return NIDS_PER_BLOCK + 1; 550 551 page = get_node_page(sbi, dn->nid); 552 if (IS_ERR(page)) 553 return PTR_ERR(page); 554 555 rn = (struct f2fs_node *)page_address(page); 556 if (depth < 3) { 557 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 558 child_nid = le32_to_cpu(rn->in.nid[i]); 559 if (child_nid == 0) 560 continue; 561 rdn.nid = child_nid; 562 ret = truncate_dnode(&rdn); 563 if (ret < 0) 564 goto out_err; 565 set_nid(page, i, 0, false); 566 } 567 } else { 568 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 569 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 570 child_nid = le32_to_cpu(rn->in.nid[i]); 571 if (child_nid == 0) { 572 child_nofs += NIDS_PER_BLOCK + 1; 573 continue; 574 } 575 rdn.nid = child_nid; 576 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 577 if (ret == (NIDS_PER_BLOCK + 1)) { 578 set_nid(page, i, 0, false); 579 child_nofs += ret; 580 } else if (ret < 0 && ret != -ENOENT) { 581 goto out_err; 582 } 583 } 584 freed = child_nofs; 585 } 586 587 if (!ofs) { 588 /* remove current indirect node */ 589 dn->node_page = page; 590 truncate_node(dn); 591 freed++; 592 } else { 593 f2fs_put_page(page, 1); 594 } 595 return freed; 596 597 out_err: 598 f2fs_put_page(page, 1); 599 return ret; 600 } 601 602 static int truncate_partial_nodes(struct dnode_of_data *dn, 603 struct f2fs_inode *ri, int *offset, int depth) 604 { 605 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 606 struct page *pages[2]; 607 nid_t nid[3]; 608 nid_t child_nid; 609 int err = 0; 610 int i; 611 int idx = depth - 2; 612 613 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 614 if (!nid[0]) 615 return 0; 616 617 /* get indirect nodes in the path */ 618 for (i = 0; i < depth - 1; i++) { 619 /* refernece count'll be increased */ 620 pages[i] = get_node_page(sbi, nid[i]); 621 if (IS_ERR(pages[i])) { 622 depth = i + 1; 623 err = PTR_ERR(pages[i]); 624 goto fail; 625 } 626 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 627 } 628 629 /* free direct nodes linked to a partial indirect node */ 630 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) { 631 child_nid = get_nid(pages[idx], i, false); 632 if (!child_nid) 633 continue; 634 dn->nid = child_nid; 635 err = truncate_dnode(dn); 636 if (err < 0) 637 goto fail; 638 set_nid(pages[idx], i, 0, false); 639 } 640 641 if (offset[depth - 1] == 0) { 642 dn->node_page = pages[idx]; 643 dn->nid = nid[idx]; 644 truncate_node(dn); 645 } else { 646 f2fs_put_page(pages[idx], 1); 647 } 648 offset[idx]++; 649 offset[depth - 1] = 0; 650 fail: 651 for (i = depth - 3; i >= 0; i--) 652 f2fs_put_page(pages[i], 1); 653 return err; 654 } 655 656 /* 657 * All the block addresses of data and nodes should be nullified. 658 */ 659 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 660 { 661 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 662 int err = 0, cont = 1; 663 int level, offset[4], noffset[4]; 664 unsigned int nofs = 0; 665 struct f2fs_node *rn; 666 struct dnode_of_data dn; 667 struct page *page; 668 669 level = get_node_path(from, offset, noffset); 670 671 page = get_node_page(sbi, inode->i_ino); 672 if (IS_ERR(page)) 673 return PTR_ERR(page); 674 675 set_new_dnode(&dn, inode, page, NULL, 0); 676 unlock_page(page); 677 678 rn = page_address(page); 679 switch (level) { 680 case 0: 681 case 1: 682 nofs = noffset[1]; 683 break; 684 case 2: 685 nofs = noffset[1]; 686 if (!offset[level - 1]) 687 goto skip_partial; 688 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 689 if (err < 0 && err != -ENOENT) 690 goto fail; 691 nofs += 1 + NIDS_PER_BLOCK; 692 break; 693 case 3: 694 nofs = 5 + 2 * NIDS_PER_BLOCK; 695 if (!offset[level - 1]) 696 goto skip_partial; 697 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 698 if (err < 0 && err != -ENOENT) 699 goto fail; 700 break; 701 default: 702 BUG(); 703 } 704 705 skip_partial: 706 while (cont) { 707 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]); 708 switch (offset[0]) { 709 case NODE_DIR1_BLOCK: 710 case NODE_DIR2_BLOCK: 711 err = truncate_dnode(&dn); 712 break; 713 714 case NODE_IND1_BLOCK: 715 case NODE_IND2_BLOCK: 716 err = truncate_nodes(&dn, nofs, offset[1], 2); 717 break; 718 719 case NODE_DIND_BLOCK: 720 err = truncate_nodes(&dn, nofs, offset[1], 3); 721 cont = 0; 722 break; 723 724 default: 725 BUG(); 726 } 727 if (err < 0 && err != -ENOENT) 728 goto fail; 729 if (offset[1] == 0 && 730 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) { 731 lock_page(page); 732 wait_on_page_writeback(page); 733 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 734 set_page_dirty(page); 735 unlock_page(page); 736 } 737 offset[1] = 0; 738 offset[0]++; 739 nofs += err; 740 } 741 fail: 742 f2fs_put_page(page, 0); 743 return err > 0 ? 0 : err; 744 } 745 746 /* 747 * Caller should grab and release a mutex by calling mutex_lock_op() and 748 * mutex_unlock_op(). 749 */ 750 int remove_inode_page(struct inode *inode) 751 { 752 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 753 struct page *page; 754 nid_t ino = inode->i_ino; 755 struct dnode_of_data dn; 756 757 page = get_node_page(sbi, ino); 758 if (IS_ERR(page)) 759 return PTR_ERR(page); 760 761 if (F2FS_I(inode)->i_xattr_nid) { 762 nid_t nid = F2FS_I(inode)->i_xattr_nid; 763 struct page *npage = get_node_page(sbi, nid); 764 765 if (IS_ERR(npage)) 766 return PTR_ERR(npage); 767 768 F2FS_I(inode)->i_xattr_nid = 0; 769 set_new_dnode(&dn, inode, page, npage, nid); 770 dn.inode_page_locked = 1; 771 truncate_node(&dn); 772 } 773 774 /* 0 is possible, after f2fs_new_inode() is failed */ 775 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1); 776 set_new_dnode(&dn, inode, page, page, ino); 777 truncate_node(&dn); 778 return 0; 779 } 780 781 int new_inode_page(struct inode *inode, const struct qstr *name) 782 { 783 struct page *page; 784 struct dnode_of_data dn; 785 786 /* allocate inode page for new inode */ 787 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 788 page = new_node_page(&dn, 0); 789 init_dent_inode(name, page); 790 if (IS_ERR(page)) 791 return PTR_ERR(page); 792 f2fs_put_page(page, 1); 793 return 0; 794 } 795 796 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) 797 { 798 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 799 struct address_space *mapping = sbi->node_inode->i_mapping; 800 struct node_info old_ni, new_ni; 801 struct page *page; 802 int err; 803 804 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) 805 return ERR_PTR(-EPERM); 806 807 page = grab_cache_page(mapping, dn->nid); 808 if (!page) 809 return ERR_PTR(-ENOMEM); 810 811 get_node_info(sbi, dn->nid, &old_ni); 812 813 SetPageUptodate(page); 814 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 815 816 /* Reinitialize old_ni with new node page */ 817 BUG_ON(old_ni.blk_addr != NULL_ADDR); 818 new_ni = old_ni; 819 new_ni.ino = dn->inode->i_ino; 820 821 if (!inc_valid_node_count(sbi, dn->inode, 1)) { 822 err = -ENOSPC; 823 goto fail; 824 } 825 set_node_addr(sbi, &new_ni, NEW_ADDR); 826 set_cold_node(dn->inode, page); 827 828 dn->node_page = page; 829 sync_inode_page(dn); 830 set_page_dirty(page); 831 if (ofs == 0) 832 inc_valid_inode_count(sbi); 833 834 return page; 835 836 fail: 837 clear_node_page_dirty(page); 838 f2fs_put_page(page, 1); 839 return ERR_PTR(err); 840 } 841 842 /* 843 * Caller should do after getting the following values. 844 * 0: f2fs_put_page(page, 0) 845 * LOCKED_PAGE: f2fs_put_page(page, 1) 846 * error: nothing 847 */ 848 static int read_node_page(struct page *page, int type) 849 { 850 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 851 struct node_info ni; 852 853 get_node_info(sbi, page->index, &ni); 854 855 if (ni.blk_addr == NULL_ADDR) { 856 f2fs_put_page(page, 1); 857 return -ENOENT; 858 } 859 860 if (PageUptodate(page)) 861 return LOCKED_PAGE; 862 863 return f2fs_readpage(sbi, page, ni.blk_addr, type); 864 } 865 866 /* 867 * Readahead a node page 868 */ 869 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 870 { 871 struct address_space *mapping = sbi->node_inode->i_mapping; 872 struct page *apage; 873 int err; 874 875 apage = find_get_page(mapping, nid); 876 if (apage && PageUptodate(apage)) { 877 f2fs_put_page(apage, 0); 878 return; 879 } 880 f2fs_put_page(apage, 0); 881 882 apage = grab_cache_page(mapping, nid); 883 if (!apage) 884 return; 885 886 err = read_node_page(apage, READA); 887 if (err == 0) 888 f2fs_put_page(apage, 0); 889 else if (err == LOCKED_PAGE) 890 f2fs_put_page(apage, 1); 891 return; 892 } 893 894 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 895 { 896 struct address_space *mapping = sbi->node_inode->i_mapping; 897 struct page *page; 898 int err; 899 900 page = grab_cache_page(mapping, nid); 901 if (!page) 902 return ERR_PTR(-ENOMEM); 903 904 err = read_node_page(page, READ_SYNC); 905 if (err < 0) 906 return ERR_PTR(err); 907 else if (err == LOCKED_PAGE) 908 goto got_it; 909 910 lock_page(page); 911 if (!PageUptodate(page)) { 912 f2fs_put_page(page, 1); 913 return ERR_PTR(-EIO); 914 } 915 got_it: 916 BUG_ON(nid != nid_of_node(page)); 917 mark_page_accessed(page); 918 return page; 919 } 920 921 /* 922 * Return a locked page for the desired node page. 923 * And, readahead MAX_RA_NODE number of node pages. 924 */ 925 struct page *get_node_page_ra(struct page *parent, int start) 926 { 927 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 928 struct address_space *mapping = sbi->node_inode->i_mapping; 929 struct page *page; 930 int err, i, end; 931 nid_t nid; 932 933 /* First, try getting the desired direct node. */ 934 nid = get_nid(parent, start, false); 935 if (!nid) 936 return ERR_PTR(-ENOENT); 937 938 page = grab_cache_page(mapping, nid); 939 if (!page) 940 return ERR_PTR(-ENOMEM); 941 942 err = read_node_page(page, READ_SYNC); 943 if (err < 0) 944 return ERR_PTR(err); 945 else if (err == LOCKED_PAGE) 946 goto page_hit; 947 948 /* Then, try readahead for siblings of the desired node */ 949 end = start + MAX_RA_NODE; 950 end = min(end, NIDS_PER_BLOCK); 951 for (i = start + 1; i < end; i++) { 952 nid = get_nid(parent, i, false); 953 if (!nid) 954 continue; 955 ra_node_page(sbi, nid); 956 } 957 958 lock_page(page); 959 960 page_hit: 961 if (!PageUptodate(page)) { 962 f2fs_put_page(page, 1); 963 return ERR_PTR(-EIO); 964 } 965 mark_page_accessed(page); 966 return page; 967 } 968 969 void sync_inode_page(struct dnode_of_data *dn) 970 { 971 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 972 update_inode(dn->inode, dn->node_page); 973 } else if (dn->inode_page) { 974 if (!dn->inode_page_locked) 975 lock_page(dn->inode_page); 976 update_inode(dn->inode, dn->inode_page); 977 if (!dn->inode_page_locked) 978 unlock_page(dn->inode_page); 979 } else { 980 update_inode_page(dn->inode); 981 } 982 } 983 984 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 985 struct writeback_control *wbc) 986 { 987 struct address_space *mapping = sbi->node_inode->i_mapping; 988 pgoff_t index, end; 989 struct pagevec pvec; 990 int step = ino ? 2 : 0; 991 int nwritten = 0, wrote = 0; 992 993 pagevec_init(&pvec, 0); 994 995 next_step: 996 index = 0; 997 end = LONG_MAX; 998 999 while (index <= end) { 1000 int i, nr_pages; 1001 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1002 PAGECACHE_TAG_DIRTY, 1003 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1004 if (nr_pages == 0) 1005 break; 1006 1007 for (i = 0; i < nr_pages; i++) { 1008 struct page *page = pvec.pages[i]; 1009 1010 /* 1011 * flushing sequence with step: 1012 * 0. indirect nodes 1013 * 1. dentry dnodes 1014 * 2. file dnodes 1015 */ 1016 if (step == 0 && IS_DNODE(page)) 1017 continue; 1018 if (step == 1 && (!IS_DNODE(page) || 1019 is_cold_node(page))) 1020 continue; 1021 if (step == 2 && (!IS_DNODE(page) || 1022 !is_cold_node(page))) 1023 continue; 1024 1025 /* 1026 * If an fsync mode, 1027 * we should not skip writing node pages. 1028 */ 1029 if (ino && ino_of_node(page) == ino) 1030 lock_page(page); 1031 else if (!trylock_page(page)) 1032 continue; 1033 1034 if (unlikely(page->mapping != mapping)) { 1035 continue_unlock: 1036 unlock_page(page); 1037 continue; 1038 } 1039 if (ino && ino_of_node(page) != ino) 1040 goto continue_unlock; 1041 1042 if (!PageDirty(page)) { 1043 /* someone wrote it for us */ 1044 goto continue_unlock; 1045 } 1046 1047 if (!clear_page_dirty_for_io(page)) 1048 goto continue_unlock; 1049 1050 /* called by fsync() */ 1051 if (ino && IS_DNODE(page)) { 1052 int mark = !is_checkpointed_node(sbi, ino); 1053 set_fsync_mark(page, 1); 1054 if (IS_INODE(page)) 1055 set_dentry_mark(page, mark); 1056 nwritten++; 1057 } else { 1058 set_fsync_mark(page, 0); 1059 set_dentry_mark(page, 0); 1060 } 1061 mapping->a_ops->writepage(page, wbc); 1062 wrote++; 1063 1064 if (--wbc->nr_to_write == 0) 1065 break; 1066 } 1067 pagevec_release(&pvec); 1068 cond_resched(); 1069 1070 if (wbc->nr_to_write == 0) { 1071 step = 2; 1072 break; 1073 } 1074 } 1075 1076 if (step < 2) { 1077 step++; 1078 goto next_step; 1079 } 1080 1081 if (wrote) 1082 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL); 1083 1084 return nwritten; 1085 } 1086 1087 static int f2fs_write_node_page(struct page *page, 1088 struct writeback_control *wbc) 1089 { 1090 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1091 nid_t nid; 1092 block_t new_addr; 1093 struct node_info ni; 1094 1095 wait_on_page_writeback(page); 1096 1097 /* get old block addr of this node page */ 1098 nid = nid_of_node(page); 1099 BUG_ON(page->index != nid); 1100 1101 get_node_info(sbi, nid, &ni); 1102 1103 /* This page is already truncated */ 1104 if (ni.blk_addr == NULL_ADDR) { 1105 dec_page_count(sbi, F2FS_DIRTY_NODES); 1106 unlock_page(page); 1107 return 0; 1108 } 1109 1110 if (wbc->for_reclaim) { 1111 dec_page_count(sbi, F2FS_DIRTY_NODES); 1112 wbc->pages_skipped++; 1113 set_page_dirty(page); 1114 return AOP_WRITEPAGE_ACTIVATE; 1115 } 1116 1117 mutex_lock(&sbi->node_write); 1118 set_page_writeback(page); 1119 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); 1120 set_node_addr(sbi, &ni, new_addr); 1121 dec_page_count(sbi, F2FS_DIRTY_NODES); 1122 mutex_unlock(&sbi->node_write); 1123 unlock_page(page); 1124 return 0; 1125 } 1126 1127 /* 1128 * It is very important to gather dirty pages and write at once, so that we can 1129 * submit a big bio without interfering other data writes. 1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable. 1131 */ 1132 #define COLLECT_DIRTY_NODES 512 1133 static int f2fs_write_node_pages(struct address_space *mapping, 1134 struct writeback_control *wbc) 1135 { 1136 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1137 struct block_device *bdev = sbi->sb->s_bdev; 1138 long nr_to_write = wbc->nr_to_write; 1139 1140 /* First check balancing cached NAT entries */ 1141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1142 f2fs_sync_fs(sbi->sb, true); 1143 return 0; 1144 } 1145 1146 /* collect a number of dirty node pages and write together */ 1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) 1148 return 0; 1149 1150 /* if mounting is failed, skip writing node pages */ 1151 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1152 sync_node_pages(sbi, 0, wbc); 1153 wbc->nr_to_write = nr_to_write - 1154 (bio_get_nr_vecs(bdev) - wbc->nr_to_write); 1155 return 0; 1156 } 1157 1158 static int f2fs_set_node_page_dirty(struct page *page) 1159 { 1160 struct address_space *mapping = page->mapping; 1161 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1162 1163 SetPageUptodate(page); 1164 if (!PageDirty(page)) { 1165 __set_page_dirty_nobuffers(page); 1166 inc_page_count(sbi, F2FS_DIRTY_NODES); 1167 SetPagePrivate(page); 1168 return 1; 1169 } 1170 return 0; 1171 } 1172 1173 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) 1174 { 1175 struct inode *inode = page->mapping->host; 1176 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1177 if (PageDirty(page)) 1178 dec_page_count(sbi, F2FS_DIRTY_NODES); 1179 ClearPagePrivate(page); 1180 } 1181 1182 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1183 { 1184 ClearPagePrivate(page); 1185 return 1; 1186 } 1187 1188 /* 1189 * Structure of the f2fs node operations 1190 */ 1191 const struct address_space_operations f2fs_node_aops = { 1192 .writepage = f2fs_write_node_page, 1193 .writepages = f2fs_write_node_pages, 1194 .set_page_dirty = f2fs_set_node_page_dirty, 1195 .invalidatepage = f2fs_invalidate_node_page, 1196 .releasepage = f2fs_release_node_page, 1197 }; 1198 1199 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) 1200 { 1201 struct list_head *this; 1202 struct free_nid *i; 1203 list_for_each(this, head) { 1204 i = list_entry(this, struct free_nid, list); 1205 if (i->nid == n) 1206 return i; 1207 } 1208 return NULL; 1209 } 1210 1211 static void __del_from_free_nid_list(struct free_nid *i) 1212 { 1213 list_del(&i->list); 1214 kmem_cache_free(free_nid_slab, i); 1215 } 1216 1217 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1218 { 1219 struct free_nid *i; 1220 1221 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) 1222 return 0; 1223 retry: 1224 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1225 if (!i) { 1226 cond_resched(); 1227 goto retry; 1228 } 1229 i->nid = nid; 1230 i->state = NID_NEW; 1231 1232 spin_lock(&nm_i->free_nid_list_lock); 1233 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { 1234 spin_unlock(&nm_i->free_nid_list_lock); 1235 kmem_cache_free(free_nid_slab, i); 1236 return 0; 1237 } 1238 list_add_tail(&i->list, &nm_i->free_nid_list); 1239 nm_i->fcnt++; 1240 spin_unlock(&nm_i->free_nid_list_lock); 1241 return 1; 1242 } 1243 1244 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1245 { 1246 struct free_nid *i; 1247 spin_lock(&nm_i->free_nid_list_lock); 1248 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1249 if (i && i->state == NID_NEW) { 1250 __del_from_free_nid_list(i); 1251 nm_i->fcnt--; 1252 } 1253 spin_unlock(&nm_i->free_nid_list_lock); 1254 } 1255 1256 static int scan_nat_page(struct f2fs_nm_info *nm_i, 1257 struct page *nat_page, nid_t start_nid) 1258 { 1259 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1260 block_t blk_addr; 1261 int fcnt = 0; 1262 int i; 1263 1264 /* 0 nid should not be used */ 1265 if (start_nid == 0) 1266 ++start_nid; 1267 1268 i = start_nid % NAT_ENTRY_PER_BLOCK; 1269 1270 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1271 if (start_nid >= nm_i->max_nid) 1272 break; 1273 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1274 BUG_ON(blk_addr == NEW_ADDR); 1275 if (blk_addr == NULL_ADDR) 1276 fcnt += add_free_nid(nm_i, start_nid); 1277 } 1278 return fcnt; 1279 } 1280 1281 static void build_free_nids(struct f2fs_sb_info *sbi) 1282 { 1283 struct free_nid *fnid, *next_fnid; 1284 struct f2fs_nm_info *nm_i = NM_I(sbi); 1285 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1286 struct f2fs_summary_block *sum = curseg->sum_blk; 1287 nid_t nid = 0; 1288 bool is_cycled = false; 1289 int fcnt = 0; 1290 int i; 1291 1292 nid = nm_i->next_scan_nid; 1293 nm_i->init_scan_nid = nid; 1294 1295 ra_nat_pages(sbi, nid); 1296 1297 while (1) { 1298 struct page *page = get_current_nat_page(sbi, nid); 1299 1300 fcnt += scan_nat_page(nm_i, page, nid); 1301 f2fs_put_page(page, 1); 1302 1303 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1304 1305 if (nid >= nm_i->max_nid) { 1306 nid = 0; 1307 is_cycled = true; 1308 } 1309 if (fcnt > MAX_FREE_NIDS) 1310 break; 1311 if (is_cycled && nm_i->init_scan_nid <= nid) 1312 break; 1313 } 1314 1315 /* go to the next nat page in order to reuse free nids first */ 1316 nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK; 1317 1318 /* find free nids from current sum_pages */ 1319 mutex_lock(&curseg->curseg_mutex); 1320 for (i = 0; i < nats_in_cursum(sum); i++) { 1321 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1322 nid = le32_to_cpu(nid_in_journal(sum, i)); 1323 if (addr == NULL_ADDR) 1324 add_free_nid(nm_i, nid); 1325 else 1326 remove_free_nid(nm_i, nid); 1327 } 1328 mutex_unlock(&curseg->curseg_mutex); 1329 1330 /* remove the free nids from current allocated nids */ 1331 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) { 1332 struct nat_entry *ne; 1333 1334 read_lock(&nm_i->nat_tree_lock); 1335 ne = __lookup_nat_cache(nm_i, fnid->nid); 1336 if (ne && nat_get_blkaddr(ne) != NULL_ADDR) 1337 remove_free_nid(nm_i, fnid->nid); 1338 read_unlock(&nm_i->nat_tree_lock); 1339 } 1340 } 1341 1342 /* 1343 * If this function returns success, caller can obtain a new nid 1344 * from second parameter of this function. 1345 * The returned nid could be used ino as well as nid when inode is created. 1346 */ 1347 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1348 { 1349 struct f2fs_nm_info *nm_i = NM_I(sbi); 1350 struct free_nid *i = NULL; 1351 struct list_head *this; 1352 retry: 1353 mutex_lock(&nm_i->build_lock); 1354 if (!nm_i->fcnt) { 1355 /* scan NAT in order to build free nid list */ 1356 build_free_nids(sbi); 1357 if (!nm_i->fcnt) { 1358 mutex_unlock(&nm_i->build_lock); 1359 return false; 1360 } 1361 } 1362 mutex_unlock(&nm_i->build_lock); 1363 1364 /* 1365 * We check fcnt again since previous check is racy as 1366 * we didn't hold free_nid_list_lock. So other thread 1367 * could consume all of free nids. 1368 */ 1369 spin_lock(&nm_i->free_nid_list_lock); 1370 if (!nm_i->fcnt) { 1371 spin_unlock(&nm_i->free_nid_list_lock); 1372 goto retry; 1373 } 1374 1375 BUG_ON(list_empty(&nm_i->free_nid_list)); 1376 list_for_each(this, &nm_i->free_nid_list) { 1377 i = list_entry(this, struct free_nid, list); 1378 if (i->state == NID_NEW) 1379 break; 1380 } 1381 1382 BUG_ON(i->state != NID_NEW); 1383 *nid = i->nid; 1384 i->state = NID_ALLOC; 1385 nm_i->fcnt--; 1386 spin_unlock(&nm_i->free_nid_list_lock); 1387 return true; 1388 } 1389 1390 /* 1391 * alloc_nid() should be called prior to this function. 1392 */ 1393 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1394 { 1395 struct f2fs_nm_info *nm_i = NM_I(sbi); 1396 struct free_nid *i; 1397 1398 spin_lock(&nm_i->free_nid_list_lock); 1399 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1400 BUG_ON(!i || i->state != NID_ALLOC); 1401 __del_from_free_nid_list(i); 1402 spin_unlock(&nm_i->free_nid_list_lock); 1403 } 1404 1405 /* 1406 * alloc_nid() should be called prior to this function. 1407 */ 1408 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1409 { 1410 struct f2fs_nm_info *nm_i = NM_I(sbi); 1411 struct free_nid *i; 1412 1413 spin_lock(&nm_i->free_nid_list_lock); 1414 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1415 BUG_ON(!i || i->state != NID_ALLOC); 1416 i->state = NID_NEW; 1417 nm_i->fcnt++; 1418 spin_unlock(&nm_i->free_nid_list_lock); 1419 } 1420 1421 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, 1422 struct f2fs_summary *sum, struct node_info *ni, 1423 block_t new_blkaddr) 1424 { 1425 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); 1426 set_node_addr(sbi, ni, new_blkaddr); 1427 clear_node_page_dirty(page); 1428 } 1429 1430 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1431 { 1432 struct address_space *mapping = sbi->node_inode->i_mapping; 1433 struct f2fs_node *src, *dst; 1434 nid_t ino = ino_of_node(page); 1435 struct node_info old_ni, new_ni; 1436 struct page *ipage; 1437 1438 ipage = grab_cache_page(mapping, ino); 1439 if (!ipage) 1440 return -ENOMEM; 1441 1442 /* Should not use this inode from free nid list */ 1443 remove_free_nid(NM_I(sbi), ino); 1444 1445 get_node_info(sbi, ino, &old_ni); 1446 SetPageUptodate(ipage); 1447 fill_node_footer(ipage, ino, ino, 0, true); 1448 1449 src = (struct f2fs_node *)page_address(page); 1450 dst = (struct f2fs_node *)page_address(ipage); 1451 1452 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); 1453 dst->i.i_size = 0; 1454 dst->i.i_blocks = cpu_to_le64(1); 1455 dst->i.i_links = cpu_to_le32(1); 1456 dst->i.i_xattr_nid = 0; 1457 1458 new_ni = old_ni; 1459 new_ni.ino = ino; 1460 1461 set_node_addr(sbi, &new_ni, NEW_ADDR); 1462 inc_valid_inode_count(sbi); 1463 1464 f2fs_put_page(ipage, 1); 1465 return 0; 1466 } 1467 1468 int restore_node_summary(struct f2fs_sb_info *sbi, 1469 unsigned int segno, struct f2fs_summary_block *sum) 1470 { 1471 struct f2fs_node *rn; 1472 struct f2fs_summary *sum_entry; 1473 struct page *page; 1474 block_t addr; 1475 int i, last_offset; 1476 1477 /* alloc temporal page for read node */ 1478 page = alloc_page(GFP_NOFS | __GFP_ZERO); 1479 if (IS_ERR(page)) 1480 return PTR_ERR(page); 1481 lock_page(page); 1482 1483 /* scan the node segment */ 1484 last_offset = sbi->blocks_per_seg; 1485 addr = START_BLOCK(sbi, segno); 1486 sum_entry = &sum->entries[0]; 1487 1488 for (i = 0; i < last_offset; i++, sum_entry++) { 1489 /* 1490 * In order to read next node page, 1491 * we must clear PageUptodate flag. 1492 */ 1493 ClearPageUptodate(page); 1494 1495 if (f2fs_readpage(sbi, page, addr, READ_SYNC)) 1496 goto out; 1497 1498 lock_page(page); 1499 rn = (struct f2fs_node *)page_address(page); 1500 sum_entry->nid = rn->footer.nid; 1501 sum_entry->version = 0; 1502 sum_entry->ofs_in_node = 0; 1503 addr++; 1504 } 1505 unlock_page(page); 1506 out: 1507 __free_pages(page, 0); 1508 return 0; 1509 } 1510 1511 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) 1512 { 1513 struct f2fs_nm_info *nm_i = NM_I(sbi); 1514 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1515 struct f2fs_summary_block *sum = curseg->sum_blk; 1516 int i; 1517 1518 mutex_lock(&curseg->curseg_mutex); 1519 1520 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { 1521 mutex_unlock(&curseg->curseg_mutex); 1522 return false; 1523 } 1524 1525 for (i = 0; i < nats_in_cursum(sum); i++) { 1526 struct nat_entry *ne; 1527 struct f2fs_nat_entry raw_ne; 1528 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1529 1530 raw_ne = nat_in_journal(sum, i); 1531 retry: 1532 write_lock(&nm_i->nat_tree_lock); 1533 ne = __lookup_nat_cache(nm_i, nid); 1534 if (ne) { 1535 __set_nat_cache_dirty(nm_i, ne); 1536 write_unlock(&nm_i->nat_tree_lock); 1537 continue; 1538 } 1539 ne = grab_nat_entry(nm_i, nid); 1540 if (!ne) { 1541 write_unlock(&nm_i->nat_tree_lock); 1542 goto retry; 1543 } 1544 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); 1545 nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); 1546 nat_set_version(ne, raw_ne.version); 1547 __set_nat_cache_dirty(nm_i, ne); 1548 write_unlock(&nm_i->nat_tree_lock); 1549 } 1550 update_nats_in_cursum(sum, -i); 1551 mutex_unlock(&curseg->curseg_mutex); 1552 return true; 1553 } 1554 1555 /* 1556 * This function is called during the checkpointing process. 1557 */ 1558 void flush_nat_entries(struct f2fs_sb_info *sbi) 1559 { 1560 struct f2fs_nm_info *nm_i = NM_I(sbi); 1561 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1562 struct f2fs_summary_block *sum = curseg->sum_blk; 1563 struct list_head *cur, *n; 1564 struct page *page = NULL; 1565 struct f2fs_nat_block *nat_blk = NULL; 1566 nid_t start_nid = 0, end_nid = 0; 1567 bool flushed; 1568 1569 flushed = flush_nats_in_journal(sbi); 1570 1571 if (!flushed) 1572 mutex_lock(&curseg->curseg_mutex); 1573 1574 /* 1) flush dirty nat caches */ 1575 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { 1576 struct nat_entry *ne; 1577 nid_t nid; 1578 struct f2fs_nat_entry raw_ne; 1579 int offset = -1; 1580 block_t new_blkaddr; 1581 1582 ne = list_entry(cur, struct nat_entry, list); 1583 nid = nat_get_nid(ne); 1584 1585 if (nat_get_blkaddr(ne) == NEW_ADDR) 1586 continue; 1587 if (flushed) 1588 goto to_nat_page; 1589 1590 /* if there is room for nat enries in curseg->sumpage */ 1591 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); 1592 if (offset >= 0) { 1593 raw_ne = nat_in_journal(sum, offset); 1594 goto flush_now; 1595 } 1596 to_nat_page: 1597 if (!page || (start_nid > nid || nid > end_nid)) { 1598 if (page) { 1599 f2fs_put_page(page, 1); 1600 page = NULL; 1601 } 1602 start_nid = START_NID(nid); 1603 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; 1604 1605 /* 1606 * get nat block with dirty flag, increased reference 1607 * count, mapped and lock 1608 */ 1609 page = get_next_nat_page(sbi, start_nid); 1610 nat_blk = page_address(page); 1611 } 1612 1613 BUG_ON(!nat_blk); 1614 raw_ne = nat_blk->entries[nid - start_nid]; 1615 flush_now: 1616 new_blkaddr = nat_get_blkaddr(ne); 1617 1618 raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); 1619 raw_ne.block_addr = cpu_to_le32(new_blkaddr); 1620 raw_ne.version = nat_get_version(ne); 1621 1622 if (offset < 0) { 1623 nat_blk->entries[nid - start_nid] = raw_ne; 1624 } else { 1625 nat_in_journal(sum, offset) = raw_ne; 1626 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1627 } 1628 1629 if (nat_get_blkaddr(ne) == NULL_ADDR && 1630 !add_free_nid(NM_I(sbi), nid)) { 1631 write_lock(&nm_i->nat_tree_lock); 1632 __del_from_nat_cache(nm_i, ne); 1633 write_unlock(&nm_i->nat_tree_lock); 1634 } else { 1635 write_lock(&nm_i->nat_tree_lock); 1636 __clear_nat_cache_dirty(nm_i, ne); 1637 ne->checkpointed = true; 1638 write_unlock(&nm_i->nat_tree_lock); 1639 } 1640 } 1641 if (!flushed) 1642 mutex_unlock(&curseg->curseg_mutex); 1643 f2fs_put_page(page, 1); 1644 1645 /* 2) shrink nat caches if necessary */ 1646 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); 1647 } 1648 1649 static int init_node_manager(struct f2fs_sb_info *sbi) 1650 { 1651 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1652 struct f2fs_nm_info *nm_i = NM_I(sbi); 1653 unsigned char *version_bitmap; 1654 unsigned int nat_segs, nat_blocks; 1655 1656 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1657 1658 /* segment_count_nat includes pair segment so divide to 2. */ 1659 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1660 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1661 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1662 nm_i->fcnt = 0; 1663 nm_i->nat_cnt = 0; 1664 1665 INIT_LIST_HEAD(&nm_i->free_nid_list); 1666 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1667 INIT_LIST_HEAD(&nm_i->nat_entries); 1668 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1669 1670 mutex_init(&nm_i->build_lock); 1671 spin_lock_init(&nm_i->free_nid_list_lock); 1672 rwlock_init(&nm_i->nat_tree_lock); 1673 1674 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1675 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1676 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1677 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1678 if (!version_bitmap) 1679 return -EFAULT; 1680 1681 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1682 GFP_KERNEL); 1683 if (!nm_i->nat_bitmap) 1684 return -ENOMEM; 1685 return 0; 1686 } 1687 1688 int build_node_manager(struct f2fs_sb_info *sbi) 1689 { 1690 int err; 1691 1692 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1693 if (!sbi->nm_info) 1694 return -ENOMEM; 1695 1696 err = init_node_manager(sbi); 1697 if (err) 1698 return err; 1699 1700 build_free_nids(sbi); 1701 return 0; 1702 } 1703 1704 void destroy_node_manager(struct f2fs_sb_info *sbi) 1705 { 1706 struct f2fs_nm_info *nm_i = NM_I(sbi); 1707 struct free_nid *i, *next_i; 1708 struct nat_entry *natvec[NATVEC_SIZE]; 1709 nid_t nid = 0; 1710 unsigned int found; 1711 1712 if (!nm_i) 1713 return; 1714 1715 /* destroy free nid list */ 1716 spin_lock(&nm_i->free_nid_list_lock); 1717 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 1718 BUG_ON(i->state == NID_ALLOC); 1719 __del_from_free_nid_list(i); 1720 nm_i->fcnt--; 1721 } 1722 BUG_ON(nm_i->fcnt); 1723 spin_unlock(&nm_i->free_nid_list_lock); 1724 1725 /* destroy nat cache */ 1726 write_lock(&nm_i->nat_tree_lock); 1727 while ((found = __gang_lookup_nat_cache(nm_i, 1728 nid, NATVEC_SIZE, natvec))) { 1729 unsigned idx; 1730 for (idx = 0; idx < found; idx++) { 1731 struct nat_entry *e = natvec[idx]; 1732 nid = nat_get_nid(e) + 1; 1733 __del_from_nat_cache(nm_i, e); 1734 } 1735 } 1736 BUG_ON(nm_i->nat_cnt); 1737 write_unlock(&nm_i->nat_tree_lock); 1738 1739 kfree(nm_i->nat_bitmap); 1740 sbi->nm_info = NULL; 1741 kfree(nm_i); 1742 } 1743 1744 int __init create_node_manager_caches(void) 1745 { 1746 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1747 sizeof(struct nat_entry), NULL); 1748 if (!nat_entry_slab) 1749 return -ENOMEM; 1750 1751 free_nid_slab = f2fs_kmem_cache_create("free_nid", 1752 sizeof(struct free_nid), NULL); 1753 if (!free_nid_slab) { 1754 kmem_cache_destroy(nat_entry_slab); 1755 return -ENOMEM; 1756 } 1757 return 0; 1758 } 1759 1760 void destroy_node_manager_caches(void) 1761 { 1762 kmem_cache_destroy(free_nid_slab); 1763 kmem_cache_destroy(nat_entry_slab); 1764 } 1765