1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 23 static struct kmem_cache *nat_entry_slab; 24 static struct kmem_cache *free_nid_slab; 25 26 static void clear_node_page_dirty(struct page *page) 27 { 28 struct address_space *mapping = page->mapping; 29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 30 unsigned int long flags; 31 32 if (PageDirty(page)) { 33 spin_lock_irqsave(&mapping->tree_lock, flags); 34 radix_tree_tag_clear(&mapping->page_tree, 35 page_index(page), 36 PAGECACHE_TAG_DIRTY); 37 spin_unlock_irqrestore(&mapping->tree_lock, flags); 38 39 clear_page_dirty_for_io(page); 40 dec_page_count(sbi, F2FS_DIRTY_NODES); 41 } 42 ClearPageUptodate(page); 43 } 44 45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 46 { 47 pgoff_t index = current_nat_addr(sbi, nid); 48 return get_meta_page(sbi, index); 49 } 50 51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 52 { 53 struct page *src_page; 54 struct page *dst_page; 55 pgoff_t src_off; 56 pgoff_t dst_off; 57 void *src_addr; 58 void *dst_addr; 59 struct f2fs_nm_info *nm_i = NM_I(sbi); 60 61 src_off = current_nat_addr(sbi, nid); 62 dst_off = next_nat_addr(sbi, src_off); 63 64 /* get current nat block page with lock */ 65 src_page = get_meta_page(sbi, src_off); 66 67 /* Dirty src_page means that it is already the new target NAT page. */ 68 if (PageDirty(src_page)) 69 return src_page; 70 71 dst_page = grab_meta_page(sbi, dst_off); 72 73 src_addr = page_address(src_page); 74 dst_addr = page_address(dst_page); 75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 76 set_page_dirty(dst_page); 77 f2fs_put_page(src_page, 1); 78 79 set_to_next_nat(nm_i, nid); 80 81 return dst_page; 82 } 83 84 /* 85 * Readahead NAT pages 86 */ 87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) 88 { 89 struct address_space *mapping = sbi->meta_inode->i_mapping; 90 struct f2fs_nm_info *nm_i = NM_I(sbi); 91 struct page *page; 92 pgoff_t index; 93 int i; 94 95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { 96 if (nid >= nm_i->max_nid) 97 nid = 0; 98 index = current_nat_addr(sbi, nid); 99 100 page = grab_cache_page(mapping, index); 101 if (!page) 102 continue; 103 if (f2fs_readpage(sbi, page, index, READ)) { 104 f2fs_put_page(page, 1); 105 continue; 106 } 107 page_cache_release(page); 108 } 109 } 110 111 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 112 { 113 return radix_tree_lookup(&nm_i->nat_root, n); 114 } 115 116 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 117 nid_t start, unsigned int nr, struct nat_entry **ep) 118 { 119 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 120 } 121 122 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 123 { 124 list_del(&e->list); 125 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 126 nm_i->nat_cnt--; 127 kmem_cache_free(nat_entry_slab, e); 128 } 129 130 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 131 { 132 struct f2fs_nm_info *nm_i = NM_I(sbi); 133 struct nat_entry *e; 134 int is_cp = 1; 135 136 read_lock(&nm_i->nat_tree_lock); 137 e = __lookup_nat_cache(nm_i, nid); 138 if (e && !e->checkpointed) 139 is_cp = 0; 140 read_unlock(&nm_i->nat_tree_lock); 141 return is_cp; 142 } 143 144 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 145 { 146 struct nat_entry *new; 147 148 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 149 if (!new) 150 return NULL; 151 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 152 kmem_cache_free(nat_entry_slab, new); 153 return NULL; 154 } 155 memset(new, 0, sizeof(struct nat_entry)); 156 nat_set_nid(new, nid); 157 list_add_tail(&new->list, &nm_i->nat_entries); 158 nm_i->nat_cnt++; 159 return new; 160 } 161 162 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 163 struct f2fs_nat_entry *ne) 164 { 165 struct nat_entry *e; 166 retry: 167 write_lock(&nm_i->nat_tree_lock); 168 e = __lookup_nat_cache(nm_i, nid); 169 if (!e) { 170 e = grab_nat_entry(nm_i, nid); 171 if (!e) { 172 write_unlock(&nm_i->nat_tree_lock); 173 goto retry; 174 } 175 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); 176 nat_set_ino(e, le32_to_cpu(ne->ino)); 177 nat_set_version(e, ne->version); 178 e->checkpointed = true; 179 } 180 write_unlock(&nm_i->nat_tree_lock); 181 } 182 183 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 184 block_t new_blkaddr) 185 { 186 struct f2fs_nm_info *nm_i = NM_I(sbi); 187 struct nat_entry *e; 188 retry: 189 write_lock(&nm_i->nat_tree_lock); 190 e = __lookup_nat_cache(nm_i, ni->nid); 191 if (!e) { 192 e = grab_nat_entry(nm_i, ni->nid); 193 if (!e) { 194 write_unlock(&nm_i->nat_tree_lock); 195 goto retry; 196 } 197 e->ni = *ni; 198 e->checkpointed = true; 199 BUG_ON(ni->blk_addr == NEW_ADDR); 200 } else if (new_blkaddr == NEW_ADDR) { 201 /* 202 * when nid is reallocated, 203 * previous nat entry can be remained in nat cache. 204 * So, reinitialize it with new information. 205 */ 206 e->ni = *ni; 207 BUG_ON(ni->blk_addr != NULL_ADDR); 208 } 209 210 if (new_blkaddr == NEW_ADDR) 211 e->checkpointed = false; 212 213 /* sanity check */ 214 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr); 215 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR && 216 new_blkaddr == NULL_ADDR); 217 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR && 218 new_blkaddr == NEW_ADDR); 219 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR && 220 nat_get_blkaddr(e) != NULL_ADDR && 221 new_blkaddr == NEW_ADDR); 222 223 /* increament version no as node is removed */ 224 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 225 unsigned char version = nat_get_version(e); 226 nat_set_version(e, inc_node_version(version)); 227 } 228 229 /* change address */ 230 nat_set_blkaddr(e, new_blkaddr); 231 __set_nat_cache_dirty(nm_i, e); 232 write_unlock(&nm_i->nat_tree_lock); 233 } 234 235 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 236 { 237 struct f2fs_nm_info *nm_i = NM_I(sbi); 238 239 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD) 240 return 0; 241 242 write_lock(&nm_i->nat_tree_lock); 243 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 244 struct nat_entry *ne; 245 ne = list_first_entry(&nm_i->nat_entries, 246 struct nat_entry, list); 247 __del_from_nat_cache(nm_i, ne); 248 nr_shrink--; 249 } 250 write_unlock(&nm_i->nat_tree_lock); 251 return nr_shrink; 252 } 253 254 /* 255 * This function returns always success 256 */ 257 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 258 { 259 struct f2fs_nm_info *nm_i = NM_I(sbi); 260 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 261 struct f2fs_summary_block *sum = curseg->sum_blk; 262 nid_t start_nid = START_NID(nid); 263 struct f2fs_nat_block *nat_blk; 264 struct page *page = NULL; 265 struct f2fs_nat_entry ne; 266 struct nat_entry *e; 267 int i; 268 269 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 270 ni->nid = nid; 271 272 /* Check nat cache */ 273 read_lock(&nm_i->nat_tree_lock); 274 e = __lookup_nat_cache(nm_i, nid); 275 if (e) { 276 ni->ino = nat_get_ino(e); 277 ni->blk_addr = nat_get_blkaddr(e); 278 ni->version = nat_get_version(e); 279 } 280 read_unlock(&nm_i->nat_tree_lock); 281 if (e) 282 return; 283 284 /* Check current segment summary */ 285 mutex_lock(&curseg->curseg_mutex); 286 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 287 if (i >= 0) { 288 ne = nat_in_journal(sum, i); 289 node_info_from_raw_nat(ni, &ne); 290 } 291 mutex_unlock(&curseg->curseg_mutex); 292 if (i >= 0) 293 goto cache; 294 295 /* Fill node_info from nat page */ 296 page = get_current_nat_page(sbi, start_nid); 297 nat_blk = (struct f2fs_nat_block *)page_address(page); 298 ne = nat_blk->entries[nid - start_nid]; 299 node_info_from_raw_nat(ni, &ne); 300 f2fs_put_page(page, 1); 301 cache: 302 /* cache nat entry */ 303 cache_nat_entry(NM_I(sbi), nid, &ne); 304 } 305 306 /* 307 * The maximum depth is four. 308 * Offset[0] will have raw inode offset. 309 */ 310 static int get_node_path(long block, int offset[4], unsigned int noffset[4]) 311 { 312 const long direct_index = ADDRS_PER_INODE; 313 const long direct_blks = ADDRS_PER_BLOCK; 314 const long dptrs_per_blk = NIDS_PER_BLOCK; 315 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 316 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 317 int n = 0; 318 int level = 0; 319 320 noffset[0] = 0; 321 322 if (block < direct_index) { 323 offset[n++] = block; 324 level = 0; 325 goto got; 326 } 327 block -= direct_index; 328 if (block < direct_blks) { 329 offset[n++] = NODE_DIR1_BLOCK; 330 noffset[n] = 1; 331 offset[n++] = block; 332 level = 1; 333 goto got; 334 } 335 block -= direct_blks; 336 if (block < direct_blks) { 337 offset[n++] = NODE_DIR2_BLOCK; 338 noffset[n] = 2; 339 offset[n++] = block; 340 level = 1; 341 goto got; 342 } 343 block -= direct_blks; 344 if (block < indirect_blks) { 345 offset[n++] = NODE_IND1_BLOCK; 346 noffset[n] = 3; 347 offset[n++] = block / direct_blks; 348 noffset[n] = 4 + offset[n - 1]; 349 offset[n++] = block % direct_blks; 350 level = 2; 351 goto got; 352 } 353 block -= indirect_blks; 354 if (block < indirect_blks) { 355 offset[n++] = NODE_IND2_BLOCK; 356 noffset[n] = 4 + dptrs_per_blk; 357 offset[n++] = block / direct_blks; 358 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 359 offset[n++] = block % direct_blks; 360 level = 2; 361 goto got; 362 } 363 block -= indirect_blks; 364 if (block < dindirect_blks) { 365 offset[n++] = NODE_DIND_BLOCK; 366 noffset[n] = 5 + (dptrs_per_blk * 2); 367 offset[n++] = block / indirect_blks; 368 noffset[n] = 6 + (dptrs_per_blk * 2) + 369 offset[n - 1] * (dptrs_per_blk + 1); 370 offset[n++] = (block / direct_blks) % dptrs_per_blk; 371 noffset[n] = 7 + (dptrs_per_blk * 2) + 372 offset[n - 2] * (dptrs_per_blk + 1) + 373 offset[n - 1]; 374 offset[n++] = block % direct_blks; 375 level = 3; 376 goto got; 377 } else { 378 BUG(); 379 } 380 got: 381 return level; 382 } 383 384 /* 385 * Caller should call f2fs_put_dnode(dn). 386 */ 387 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro) 388 { 389 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 390 struct page *npage[4]; 391 struct page *parent; 392 int offset[4]; 393 unsigned int noffset[4]; 394 nid_t nids[4]; 395 int level, i; 396 int err = 0; 397 398 level = get_node_path(index, offset, noffset); 399 400 nids[0] = dn->inode->i_ino; 401 npage[0] = get_node_page(sbi, nids[0]); 402 if (IS_ERR(npage[0])) 403 return PTR_ERR(npage[0]); 404 405 parent = npage[0]; 406 nids[1] = get_nid(parent, offset[0], true); 407 dn->inode_page = npage[0]; 408 dn->inode_page_locked = true; 409 410 /* get indirect or direct nodes */ 411 for (i = 1; i <= level; i++) { 412 bool done = false; 413 414 if (!nids[i] && !ro) { 415 mutex_lock_op(sbi, NODE_NEW); 416 417 /* alloc new node */ 418 if (!alloc_nid(sbi, &(nids[i]))) { 419 mutex_unlock_op(sbi, NODE_NEW); 420 err = -ENOSPC; 421 goto release_pages; 422 } 423 424 dn->nid = nids[i]; 425 npage[i] = new_node_page(dn, noffset[i]); 426 if (IS_ERR(npage[i])) { 427 alloc_nid_failed(sbi, nids[i]); 428 mutex_unlock_op(sbi, NODE_NEW); 429 err = PTR_ERR(npage[i]); 430 goto release_pages; 431 } 432 433 set_nid(parent, offset[i - 1], nids[i], i == 1); 434 alloc_nid_done(sbi, nids[i]); 435 mutex_unlock_op(sbi, NODE_NEW); 436 done = true; 437 } else if (ro && i == level && level > 1) { 438 npage[i] = get_node_page_ra(parent, offset[i - 1]); 439 if (IS_ERR(npage[i])) { 440 err = PTR_ERR(npage[i]); 441 goto release_pages; 442 } 443 done = true; 444 } 445 if (i == 1) { 446 dn->inode_page_locked = false; 447 unlock_page(parent); 448 } else { 449 f2fs_put_page(parent, 1); 450 } 451 452 if (!done) { 453 npage[i] = get_node_page(sbi, nids[i]); 454 if (IS_ERR(npage[i])) { 455 err = PTR_ERR(npage[i]); 456 f2fs_put_page(npage[0], 0); 457 goto release_out; 458 } 459 } 460 if (i < level) { 461 parent = npage[i]; 462 nids[i + 1] = get_nid(parent, offset[i], false); 463 } 464 } 465 dn->nid = nids[level]; 466 dn->ofs_in_node = offset[level]; 467 dn->node_page = npage[level]; 468 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 469 return 0; 470 471 release_pages: 472 f2fs_put_page(parent, 1); 473 if (i > 1) 474 f2fs_put_page(npage[0], 0); 475 release_out: 476 dn->inode_page = NULL; 477 dn->node_page = NULL; 478 return err; 479 } 480 481 static void truncate_node(struct dnode_of_data *dn) 482 { 483 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 484 struct node_info ni; 485 486 get_node_info(sbi, dn->nid, &ni); 487 BUG_ON(ni.blk_addr == NULL_ADDR); 488 489 if (ni.blk_addr != NULL_ADDR) 490 invalidate_blocks(sbi, ni.blk_addr); 491 492 /* Deallocate node address */ 493 dec_valid_node_count(sbi, dn->inode, 1); 494 set_node_addr(sbi, &ni, NULL_ADDR); 495 496 if (dn->nid == dn->inode->i_ino) { 497 remove_orphan_inode(sbi, dn->nid); 498 dec_valid_inode_count(sbi); 499 } else { 500 sync_inode_page(dn); 501 } 502 503 clear_node_page_dirty(dn->node_page); 504 F2FS_SET_SB_DIRT(sbi); 505 506 f2fs_put_page(dn->node_page, 1); 507 dn->node_page = NULL; 508 } 509 510 static int truncate_dnode(struct dnode_of_data *dn) 511 { 512 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 513 struct page *page; 514 515 if (dn->nid == 0) 516 return 1; 517 518 /* get direct node */ 519 page = get_node_page(sbi, dn->nid); 520 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 521 return 1; 522 else if (IS_ERR(page)) 523 return PTR_ERR(page); 524 525 /* Make dnode_of_data for parameter */ 526 dn->node_page = page; 527 dn->ofs_in_node = 0; 528 truncate_data_blocks(dn); 529 truncate_node(dn); 530 return 1; 531 } 532 533 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 534 int ofs, int depth) 535 { 536 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 537 struct dnode_of_data rdn = *dn; 538 struct page *page; 539 struct f2fs_node *rn; 540 nid_t child_nid; 541 unsigned int child_nofs; 542 int freed = 0; 543 int i, ret; 544 545 if (dn->nid == 0) 546 return NIDS_PER_BLOCK + 1; 547 548 page = get_node_page(sbi, dn->nid); 549 if (IS_ERR(page)) 550 return PTR_ERR(page); 551 552 rn = (struct f2fs_node *)page_address(page); 553 if (depth < 3) { 554 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 555 child_nid = le32_to_cpu(rn->in.nid[i]); 556 if (child_nid == 0) 557 continue; 558 rdn.nid = child_nid; 559 ret = truncate_dnode(&rdn); 560 if (ret < 0) 561 goto out_err; 562 set_nid(page, i, 0, false); 563 } 564 } else { 565 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 566 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 567 child_nid = le32_to_cpu(rn->in.nid[i]); 568 if (child_nid == 0) { 569 child_nofs += NIDS_PER_BLOCK + 1; 570 continue; 571 } 572 rdn.nid = child_nid; 573 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 574 if (ret == (NIDS_PER_BLOCK + 1)) { 575 set_nid(page, i, 0, false); 576 child_nofs += ret; 577 } else if (ret < 0 && ret != -ENOENT) { 578 goto out_err; 579 } 580 } 581 freed = child_nofs; 582 } 583 584 if (!ofs) { 585 /* remove current indirect node */ 586 dn->node_page = page; 587 truncate_node(dn); 588 freed++; 589 } else { 590 f2fs_put_page(page, 1); 591 } 592 return freed; 593 594 out_err: 595 f2fs_put_page(page, 1); 596 return ret; 597 } 598 599 static int truncate_partial_nodes(struct dnode_of_data *dn, 600 struct f2fs_inode *ri, int *offset, int depth) 601 { 602 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 603 struct page *pages[2]; 604 nid_t nid[3]; 605 nid_t child_nid; 606 int err = 0; 607 int i; 608 int idx = depth - 2; 609 610 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 611 if (!nid[0]) 612 return 0; 613 614 /* get indirect nodes in the path */ 615 for (i = 0; i < depth - 1; i++) { 616 /* refernece count'll be increased */ 617 pages[i] = get_node_page(sbi, nid[i]); 618 if (IS_ERR(pages[i])) { 619 depth = i + 1; 620 err = PTR_ERR(pages[i]); 621 goto fail; 622 } 623 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 624 } 625 626 /* free direct nodes linked to a partial indirect node */ 627 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) { 628 child_nid = get_nid(pages[idx], i, false); 629 if (!child_nid) 630 continue; 631 dn->nid = child_nid; 632 err = truncate_dnode(dn); 633 if (err < 0) 634 goto fail; 635 set_nid(pages[idx], i, 0, false); 636 } 637 638 if (offset[depth - 1] == 0) { 639 dn->node_page = pages[idx]; 640 dn->nid = nid[idx]; 641 truncate_node(dn); 642 } else { 643 f2fs_put_page(pages[idx], 1); 644 } 645 offset[idx]++; 646 offset[depth - 1] = 0; 647 fail: 648 for (i = depth - 3; i >= 0; i--) 649 f2fs_put_page(pages[i], 1); 650 return err; 651 } 652 653 /* 654 * All the block addresses of data and nodes should be nullified. 655 */ 656 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 657 { 658 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 659 int err = 0, cont = 1; 660 int level, offset[4], noffset[4]; 661 unsigned int nofs; 662 struct f2fs_node *rn; 663 struct dnode_of_data dn; 664 struct page *page; 665 666 level = get_node_path(from, offset, noffset); 667 668 page = get_node_page(sbi, inode->i_ino); 669 if (IS_ERR(page)) 670 return PTR_ERR(page); 671 672 set_new_dnode(&dn, inode, page, NULL, 0); 673 unlock_page(page); 674 675 rn = page_address(page); 676 switch (level) { 677 case 0: 678 case 1: 679 nofs = noffset[1]; 680 break; 681 case 2: 682 nofs = noffset[1]; 683 if (!offset[level - 1]) 684 goto skip_partial; 685 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 686 if (err < 0 && err != -ENOENT) 687 goto fail; 688 nofs += 1 + NIDS_PER_BLOCK; 689 break; 690 case 3: 691 nofs = 5 + 2 * NIDS_PER_BLOCK; 692 if (!offset[level - 1]) 693 goto skip_partial; 694 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 695 if (err < 0 && err != -ENOENT) 696 goto fail; 697 break; 698 default: 699 BUG(); 700 } 701 702 skip_partial: 703 while (cont) { 704 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]); 705 switch (offset[0]) { 706 case NODE_DIR1_BLOCK: 707 case NODE_DIR2_BLOCK: 708 err = truncate_dnode(&dn); 709 break; 710 711 case NODE_IND1_BLOCK: 712 case NODE_IND2_BLOCK: 713 err = truncate_nodes(&dn, nofs, offset[1], 2); 714 break; 715 716 case NODE_DIND_BLOCK: 717 err = truncate_nodes(&dn, nofs, offset[1], 3); 718 cont = 0; 719 break; 720 721 default: 722 BUG(); 723 } 724 if (err < 0 && err != -ENOENT) 725 goto fail; 726 if (offset[1] == 0 && 727 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) { 728 lock_page(page); 729 wait_on_page_writeback(page); 730 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 731 set_page_dirty(page); 732 unlock_page(page); 733 } 734 offset[1] = 0; 735 offset[0]++; 736 nofs += err; 737 } 738 fail: 739 f2fs_put_page(page, 0); 740 return err > 0 ? 0 : err; 741 } 742 743 int remove_inode_page(struct inode *inode) 744 { 745 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 746 struct page *page; 747 nid_t ino = inode->i_ino; 748 struct dnode_of_data dn; 749 750 mutex_lock_op(sbi, NODE_TRUNC); 751 page = get_node_page(sbi, ino); 752 if (IS_ERR(page)) { 753 mutex_unlock_op(sbi, NODE_TRUNC); 754 return PTR_ERR(page); 755 } 756 757 if (F2FS_I(inode)->i_xattr_nid) { 758 nid_t nid = F2FS_I(inode)->i_xattr_nid; 759 struct page *npage = get_node_page(sbi, nid); 760 761 if (IS_ERR(npage)) { 762 mutex_unlock_op(sbi, NODE_TRUNC); 763 return PTR_ERR(npage); 764 } 765 766 F2FS_I(inode)->i_xattr_nid = 0; 767 set_new_dnode(&dn, inode, page, npage, nid); 768 dn.inode_page_locked = 1; 769 truncate_node(&dn); 770 } 771 if (inode->i_blocks == 1) { 772 /* inernally call f2fs_put_page() */ 773 set_new_dnode(&dn, inode, page, page, ino); 774 truncate_node(&dn); 775 } else if (inode->i_blocks == 0) { 776 struct node_info ni; 777 get_node_info(sbi, inode->i_ino, &ni); 778 779 /* called after f2fs_new_inode() is failed */ 780 BUG_ON(ni.blk_addr != NULL_ADDR); 781 f2fs_put_page(page, 1); 782 } else { 783 BUG(); 784 } 785 mutex_unlock_op(sbi, NODE_TRUNC); 786 return 0; 787 } 788 789 int new_inode_page(struct inode *inode, struct dentry *dentry) 790 { 791 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 792 struct page *page; 793 struct dnode_of_data dn; 794 795 /* allocate inode page for new inode */ 796 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 797 mutex_lock_op(sbi, NODE_NEW); 798 page = new_node_page(&dn, 0); 799 init_dent_inode(dentry, page); 800 mutex_unlock_op(sbi, NODE_NEW); 801 if (IS_ERR(page)) 802 return PTR_ERR(page); 803 f2fs_put_page(page, 1); 804 return 0; 805 } 806 807 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) 808 { 809 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 810 struct address_space *mapping = sbi->node_inode->i_mapping; 811 struct node_info old_ni, new_ni; 812 struct page *page; 813 int err; 814 815 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) 816 return ERR_PTR(-EPERM); 817 818 page = grab_cache_page(mapping, dn->nid); 819 if (!page) 820 return ERR_PTR(-ENOMEM); 821 822 get_node_info(sbi, dn->nid, &old_ni); 823 824 SetPageUptodate(page); 825 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 826 827 /* Reinitialize old_ni with new node page */ 828 BUG_ON(old_ni.blk_addr != NULL_ADDR); 829 new_ni = old_ni; 830 new_ni.ino = dn->inode->i_ino; 831 832 if (!inc_valid_node_count(sbi, dn->inode, 1)) { 833 err = -ENOSPC; 834 goto fail; 835 } 836 set_node_addr(sbi, &new_ni, NEW_ADDR); 837 set_cold_node(dn->inode, page); 838 839 dn->node_page = page; 840 sync_inode_page(dn); 841 set_page_dirty(page); 842 if (ofs == 0) 843 inc_valid_inode_count(sbi); 844 845 return page; 846 847 fail: 848 f2fs_put_page(page, 1); 849 return ERR_PTR(err); 850 } 851 852 static int read_node_page(struct page *page, int type) 853 { 854 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 855 struct node_info ni; 856 857 get_node_info(sbi, page->index, &ni); 858 859 if (ni.blk_addr == NULL_ADDR) 860 return -ENOENT; 861 return f2fs_readpage(sbi, page, ni.blk_addr, type); 862 } 863 864 /* 865 * Readahead a node page 866 */ 867 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 868 { 869 struct address_space *mapping = sbi->node_inode->i_mapping; 870 struct page *apage; 871 872 apage = find_get_page(mapping, nid); 873 if (apage && PageUptodate(apage)) 874 goto release_out; 875 f2fs_put_page(apage, 0); 876 877 apage = grab_cache_page(mapping, nid); 878 if (!apage) 879 return; 880 881 if (read_node_page(apage, READA)) 882 goto unlock_out; 883 884 page_cache_release(apage); 885 return; 886 887 unlock_out: 888 unlock_page(apage); 889 release_out: 890 page_cache_release(apage); 891 } 892 893 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 894 { 895 int err; 896 struct page *page; 897 struct address_space *mapping = sbi->node_inode->i_mapping; 898 899 page = grab_cache_page(mapping, nid); 900 if (!page) 901 return ERR_PTR(-ENOMEM); 902 903 err = read_node_page(page, READ_SYNC); 904 if (err) { 905 f2fs_put_page(page, 1); 906 return ERR_PTR(err); 907 } 908 909 BUG_ON(nid != nid_of_node(page)); 910 mark_page_accessed(page); 911 return page; 912 } 913 914 /* 915 * Return a locked page for the desired node page. 916 * And, readahead MAX_RA_NODE number of node pages. 917 */ 918 struct page *get_node_page_ra(struct page *parent, int start) 919 { 920 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 921 struct address_space *mapping = sbi->node_inode->i_mapping; 922 int i, end; 923 int err = 0; 924 nid_t nid; 925 struct page *page; 926 927 /* First, try getting the desired direct node. */ 928 nid = get_nid(parent, start, false); 929 if (!nid) 930 return ERR_PTR(-ENOENT); 931 932 page = find_get_page(mapping, nid); 933 if (page && PageUptodate(page)) 934 goto page_hit; 935 f2fs_put_page(page, 0); 936 937 repeat: 938 page = grab_cache_page(mapping, nid); 939 if (!page) 940 return ERR_PTR(-ENOMEM); 941 942 err = read_node_page(page, READA); 943 if (err) { 944 f2fs_put_page(page, 1); 945 return ERR_PTR(err); 946 } 947 948 /* Then, try readahead for siblings of the desired node */ 949 end = start + MAX_RA_NODE; 950 end = min(end, NIDS_PER_BLOCK); 951 for (i = start + 1; i < end; i++) { 952 nid = get_nid(parent, i, false); 953 if (!nid) 954 continue; 955 ra_node_page(sbi, nid); 956 } 957 958 page_hit: 959 lock_page(page); 960 if (PageError(page)) { 961 f2fs_put_page(page, 1); 962 return ERR_PTR(-EIO); 963 } 964 965 /* Has the page been truncated? */ 966 if (page->mapping != mapping) { 967 f2fs_put_page(page, 1); 968 goto repeat; 969 } 970 return page; 971 } 972 973 void sync_inode_page(struct dnode_of_data *dn) 974 { 975 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 976 update_inode(dn->inode, dn->node_page); 977 } else if (dn->inode_page) { 978 if (!dn->inode_page_locked) 979 lock_page(dn->inode_page); 980 update_inode(dn->inode, dn->inode_page); 981 if (!dn->inode_page_locked) 982 unlock_page(dn->inode_page); 983 } else { 984 f2fs_write_inode(dn->inode, NULL); 985 } 986 } 987 988 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 989 struct writeback_control *wbc) 990 { 991 struct address_space *mapping = sbi->node_inode->i_mapping; 992 pgoff_t index, end; 993 struct pagevec pvec; 994 int step = ino ? 2 : 0; 995 int nwritten = 0, wrote = 0; 996 997 pagevec_init(&pvec, 0); 998 999 next_step: 1000 index = 0; 1001 end = LONG_MAX; 1002 1003 while (index <= end) { 1004 int i, nr_pages; 1005 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1006 PAGECACHE_TAG_DIRTY, 1007 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1008 if (nr_pages == 0) 1009 break; 1010 1011 for (i = 0; i < nr_pages; i++) { 1012 struct page *page = pvec.pages[i]; 1013 1014 /* 1015 * flushing sequence with step: 1016 * 0. indirect nodes 1017 * 1. dentry dnodes 1018 * 2. file dnodes 1019 */ 1020 if (step == 0 && IS_DNODE(page)) 1021 continue; 1022 if (step == 1 && (!IS_DNODE(page) || 1023 is_cold_node(page))) 1024 continue; 1025 if (step == 2 && (!IS_DNODE(page) || 1026 !is_cold_node(page))) 1027 continue; 1028 1029 /* 1030 * If an fsync mode, 1031 * we should not skip writing node pages. 1032 */ 1033 if (ino && ino_of_node(page) == ino) 1034 lock_page(page); 1035 else if (!trylock_page(page)) 1036 continue; 1037 1038 if (unlikely(page->mapping != mapping)) { 1039 continue_unlock: 1040 unlock_page(page); 1041 continue; 1042 } 1043 if (ino && ino_of_node(page) != ino) 1044 goto continue_unlock; 1045 1046 if (!PageDirty(page)) { 1047 /* someone wrote it for us */ 1048 goto continue_unlock; 1049 } 1050 1051 if (!clear_page_dirty_for_io(page)) 1052 goto continue_unlock; 1053 1054 /* called by fsync() */ 1055 if (ino && IS_DNODE(page)) { 1056 int mark = !is_checkpointed_node(sbi, ino); 1057 set_fsync_mark(page, 1); 1058 if (IS_INODE(page)) 1059 set_dentry_mark(page, mark); 1060 nwritten++; 1061 } else { 1062 set_fsync_mark(page, 0); 1063 set_dentry_mark(page, 0); 1064 } 1065 mapping->a_ops->writepage(page, wbc); 1066 wrote++; 1067 1068 if (--wbc->nr_to_write == 0) 1069 break; 1070 } 1071 pagevec_release(&pvec); 1072 cond_resched(); 1073 1074 if (wbc->nr_to_write == 0) { 1075 step = 2; 1076 break; 1077 } 1078 } 1079 1080 if (step < 2) { 1081 step++; 1082 goto next_step; 1083 } 1084 1085 if (wrote) 1086 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL); 1087 1088 return nwritten; 1089 } 1090 1091 static int f2fs_write_node_page(struct page *page, 1092 struct writeback_control *wbc) 1093 { 1094 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1095 nid_t nid; 1096 unsigned int nofs; 1097 block_t new_addr; 1098 struct node_info ni; 1099 1100 if (wbc->for_reclaim) { 1101 dec_page_count(sbi, F2FS_DIRTY_NODES); 1102 wbc->pages_skipped++; 1103 set_page_dirty(page); 1104 return AOP_WRITEPAGE_ACTIVATE; 1105 } 1106 1107 wait_on_page_writeback(page); 1108 1109 mutex_lock_op(sbi, NODE_WRITE); 1110 1111 /* get old block addr of this node page */ 1112 nid = nid_of_node(page); 1113 nofs = ofs_of_node(page); 1114 BUG_ON(page->index != nid); 1115 1116 get_node_info(sbi, nid, &ni); 1117 1118 /* This page is already truncated */ 1119 if (ni.blk_addr == NULL_ADDR) 1120 return 0; 1121 1122 set_page_writeback(page); 1123 1124 /* insert node offset */ 1125 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); 1126 set_node_addr(sbi, &ni, new_addr); 1127 dec_page_count(sbi, F2FS_DIRTY_NODES); 1128 1129 mutex_unlock_op(sbi, NODE_WRITE); 1130 unlock_page(page); 1131 return 0; 1132 } 1133 1134 static int f2fs_write_node_pages(struct address_space *mapping, 1135 struct writeback_control *wbc) 1136 { 1137 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1138 struct block_device *bdev = sbi->sb->s_bdev; 1139 long nr_to_write = wbc->nr_to_write; 1140 1141 if (wbc->for_kupdate) 1142 return 0; 1143 1144 if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) 1145 return 0; 1146 1147 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1148 write_checkpoint(sbi, false, false); 1149 return 0; 1150 } 1151 1152 /* if mounting is failed, skip writing node pages */ 1153 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1154 sync_node_pages(sbi, 0, wbc); 1155 wbc->nr_to_write = nr_to_write - 1156 (bio_get_nr_vecs(bdev) - wbc->nr_to_write); 1157 return 0; 1158 } 1159 1160 static int f2fs_set_node_page_dirty(struct page *page) 1161 { 1162 struct address_space *mapping = page->mapping; 1163 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1164 1165 SetPageUptodate(page); 1166 if (!PageDirty(page)) { 1167 __set_page_dirty_nobuffers(page); 1168 inc_page_count(sbi, F2FS_DIRTY_NODES); 1169 SetPagePrivate(page); 1170 return 1; 1171 } 1172 return 0; 1173 } 1174 1175 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) 1176 { 1177 struct inode *inode = page->mapping->host; 1178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1179 if (PageDirty(page)) 1180 dec_page_count(sbi, F2FS_DIRTY_NODES); 1181 ClearPagePrivate(page); 1182 } 1183 1184 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1185 { 1186 ClearPagePrivate(page); 1187 return 0; 1188 } 1189 1190 /* 1191 * Structure of the f2fs node operations 1192 */ 1193 const struct address_space_operations f2fs_node_aops = { 1194 .writepage = f2fs_write_node_page, 1195 .writepages = f2fs_write_node_pages, 1196 .set_page_dirty = f2fs_set_node_page_dirty, 1197 .invalidatepage = f2fs_invalidate_node_page, 1198 .releasepage = f2fs_release_node_page, 1199 }; 1200 1201 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) 1202 { 1203 struct list_head *this; 1204 struct free_nid *i = NULL; 1205 list_for_each(this, head) { 1206 i = list_entry(this, struct free_nid, list); 1207 if (i->nid == n) 1208 break; 1209 i = NULL; 1210 } 1211 return i; 1212 } 1213 1214 static void __del_from_free_nid_list(struct free_nid *i) 1215 { 1216 list_del(&i->list); 1217 kmem_cache_free(free_nid_slab, i); 1218 } 1219 1220 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1221 { 1222 struct free_nid *i; 1223 1224 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) 1225 return 0; 1226 retry: 1227 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1228 if (!i) { 1229 cond_resched(); 1230 goto retry; 1231 } 1232 i->nid = nid; 1233 i->state = NID_NEW; 1234 1235 spin_lock(&nm_i->free_nid_list_lock); 1236 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { 1237 spin_unlock(&nm_i->free_nid_list_lock); 1238 kmem_cache_free(free_nid_slab, i); 1239 return 0; 1240 } 1241 list_add_tail(&i->list, &nm_i->free_nid_list); 1242 nm_i->fcnt++; 1243 spin_unlock(&nm_i->free_nid_list_lock); 1244 return 1; 1245 } 1246 1247 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1248 { 1249 struct free_nid *i; 1250 spin_lock(&nm_i->free_nid_list_lock); 1251 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1252 if (i && i->state == NID_NEW) { 1253 __del_from_free_nid_list(i); 1254 nm_i->fcnt--; 1255 } 1256 spin_unlock(&nm_i->free_nid_list_lock); 1257 } 1258 1259 static int scan_nat_page(struct f2fs_nm_info *nm_i, 1260 struct page *nat_page, nid_t start_nid) 1261 { 1262 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1263 block_t blk_addr; 1264 int fcnt = 0; 1265 int i; 1266 1267 /* 0 nid should not be used */ 1268 if (start_nid == 0) 1269 ++start_nid; 1270 1271 i = start_nid % NAT_ENTRY_PER_BLOCK; 1272 1273 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1274 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1275 BUG_ON(blk_addr == NEW_ADDR); 1276 if (blk_addr == NULL_ADDR) 1277 fcnt += add_free_nid(nm_i, start_nid); 1278 } 1279 return fcnt; 1280 } 1281 1282 static void build_free_nids(struct f2fs_sb_info *sbi) 1283 { 1284 struct free_nid *fnid, *next_fnid; 1285 struct f2fs_nm_info *nm_i = NM_I(sbi); 1286 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1287 struct f2fs_summary_block *sum = curseg->sum_blk; 1288 nid_t nid = 0; 1289 bool is_cycled = false; 1290 int fcnt = 0; 1291 int i; 1292 1293 nid = nm_i->next_scan_nid; 1294 nm_i->init_scan_nid = nid; 1295 1296 ra_nat_pages(sbi, nid); 1297 1298 while (1) { 1299 struct page *page = get_current_nat_page(sbi, nid); 1300 1301 fcnt += scan_nat_page(nm_i, page, nid); 1302 f2fs_put_page(page, 1); 1303 1304 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1305 1306 if (nid >= nm_i->max_nid) { 1307 nid = 0; 1308 is_cycled = true; 1309 } 1310 if (fcnt > MAX_FREE_NIDS) 1311 break; 1312 if (is_cycled && nm_i->init_scan_nid <= nid) 1313 break; 1314 } 1315 1316 nm_i->next_scan_nid = nid; 1317 1318 /* find free nids from current sum_pages */ 1319 mutex_lock(&curseg->curseg_mutex); 1320 for (i = 0; i < nats_in_cursum(sum); i++) { 1321 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1322 nid = le32_to_cpu(nid_in_journal(sum, i)); 1323 if (addr == NULL_ADDR) 1324 add_free_nid(nm_i, nid); 1325 else 1326 remove_free_nid(nm_i, nid); 1327 } 1328 mutex_unlock(&curseg->curseg_mutex); 1329 1330 /* remove the free nids from current allocated nids */ 1331 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) { 1332 struct nat_entry *ne; 1333 1334 read_lock(&nm_i->nat_tree_lock); 1335 ne = __lookup_nat_cache(nm_i, fnid->nid); 1336 if (ne && nat_get_blkaddr(ne) != NULL_ADDR) 1337 remove_free_nid(nm_i, fnid->nid); 1338 read_unlock(&nm_i->nat_tree_lock); 1339 } 1340 } 1341 1342 /* 1343 * If this function returns success, caller can obtain a new nid 1344 * from second parameter of this function. 1345 * The returned nid could be used ino as well as nid when inode is created. 1346 */ 1347 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1348 { 1349 struct f2fs_nm_info *nm_i = NM_I(sbi); 1350 struct free_nid *i = NULL; 1351 struct list_head *this; 1352 retry: 1353 mutex_lock(&nm_i->build_lock); 1354 if (!nm_i->fcnt) { 1355 /* scan NAT in order to build free nid list */ 1356 build_free_nids(sbi); 1357 if (!nm_i->fcnt) { 1358 mutex_unlock(&nm_i->build_lock); 1359 return false; 1360 } 1361 } 1362 mutex_unlock(&nm_i->build_lock); 1363 1364 /* 1365 * We check fcnt again since previous check is racy as 1366 * we didn't hold free_nid_list_lock. So other thread 1367 * could consume all of free nids. 1368 */ 1369 spin_lock(&nm_i->free_nid_list_lock); 1370 if (!nm_i->fcnt) { 1371 spin_unlock(&nm_i->free_nid_list_lock); 1372 goto retry; 1373 } 1374 1375 BUG_ON(list_empty(&nm_i->free_nid_list)); 1376 list_for_each(this, &nm_i->free_nid_list) { 1377 i = list_entry(this, struct free_nid, list); 1378 if (i->state == NID_NEW) 1379 break; 1380 } 1381 1382 BUG_ON(i->state != NID_NEW); 1383 *nid = i->nid; 1384 i->state = NID_ALLOC; 1385 nm_i->fcnt--; 1386 spin_unlock(&nm_i->free_nid_list_lock); 1387 return true; 1388 } 1389 1390 /* 1391 * alloc_nid() should be called prior to this function. 1392 */ 1393 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1394 { 1395 struct f2fs_nm_info *nm_i = NM_I(sbi); 1396 struct free_nid *i; 1397 1398 spin_lock(&nm_i->free_nid_list_lock); 1399 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1400 if (i) { 1401 BUG_ON(i->state != NID_ALLOC); 1402 __del_from_free_nid_list(i); 1403 } 1404 spin_unlock(&nm_i->free_nid_list_lock); 1405 } 1406 1407 /* 1408 * alloc_nid() should be called prior to this function. 1409 */ 1410 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1411 { 1412 alloc_nid_done(sbi, nid); 1413 add_free_nid(NM_I(sbi), nid); 1414 } 1415 1416 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, 1417 struct f2fs_summary *sum, struct node_info *ni, 1418 block_t new_blkaddr) 1419 { 1420 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); 1421 set_node_addr(sbi, ni, new_blkaddr); 1422 clear_node_page_dirty(page); 1423 } 1424 1425 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1426 { 1427 struct address_space *mapping = sbi->node_inode->i_mapping; 1428 struct f2fs_node *src, *dst; 1429 nid_t ino = ino_of_node(page); 1430 struct node_info old_ni, new_ni; 1431 struct page *ipage; 1432 1433 ipage = grab_cache_page(mapping, ino); 1434 if (!ipage) 1435 return -ENOMEM; 1436 1437 /* Should not use this inode from free nid list */ 1438 remove_free_nid(NM_I(sbi), ino); 1439 1440 get_node_info(sbi, ino, &old_ni); 1441 SetPageUptodate(ipage); 1442 fill_node_footer(ipage, ino, ino, 0, true); 1443 1444 src = (struct f2fs_node *)page_address(page); 1445 dst = (struct f2fs_node *)page_address(ipage); 1446 1447 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); 1448 dst->i.i_size = 0; 1449 dst->i.i_blocks = cpu_to_le64(1); 1450 dst->i.i_links = cpu_to_le32(1); 1451 dst->i.i_xattr_nid = 0; 1452 1453 new_ni = old_ni; 1454 new_ni.ino = ino; 1455 1456 set_node_addr(sbi, &new_ni, NEW_ADDR); 1457 inc_valid_inode_count(sbi); 1458 1459 f2fs_put_page(ipage, 1); 1460 return 0; 1461 } 1462 1463 int restore_node_summary(struct f2fs_sb_info *sbi, 1464 unsigned int segno, struct f2fs_summary_block *sum) 1465 { 1466 struct f2fs_node *rn; 1467 struct f2fs_summary *sum_entry; 1468 struct page *page; 1469 block_t addr; 1470 int i, last_offset; 1471 1472 /* alloc temporal page for read node */ 1473 page = alloc_page(GFP_NOFS | __GFP_ZERO); 1474 if (IS_ERR(page)) 1475 return PTR_ERR(page); 1476 lock_page(page); 1477 1478 /* scan the node segment */ 1479 last_offset = sbi->blocks_per_seg; 1480 addr = START_BLOCK(sbi, segno); 1481 sum_entry = &sum->entries[0]; 1482 1483 for (i = 0; i < last_offset; i++, sum_entry++) { 1484 if (f2fs_readpage(sbi, page, addr, READ_SYNC)) 1485 goto out; 1486 1487 rn = (struct f2fs_node *)page_address(page); 1488 sum_entry->nid = rn->footer.nid; 1489 sum_entry->version = 0; 1490 sum_entry->ofs_in_node = 0; 1491 addr++; 1492 1493 /* 1494 * In order to read next node page, 1495 * we must clear PageUptodate flag. 1496 */ 1497 ClearPageUptodate(page); 1498 } 1499 out: 1500 unlock_page(page); 1501 __free_pages(page, 0); 1502 return 0; 1503 } 1504 1505 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) 1506 { 1507 struct f2fs_nm_info *nm_i = NM_I(sbi); 1508 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1509 struct f2fs_summary_block *sum = curseg->sum_blk; 1510 int i; 1511 1512 mutex_lock(&curseg->curseg_mutex); 1513 1514 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { 1515 mutex_unlock(&curseg->curseg_mutex); 1516 return false; 1517 } 1518 1519 for (i = 0; i < nats_in_cursum(sum); i++) { 1520 struct nat_entry *ne; 1521 struct f2fs_nat_entry raw_ne; 1522 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1523 1524 raw_ne = nat_in_journal(sum, i); 1525 retry: 1526 write_lock(&nm_i->nat_tree_lock); 1527 ne = __lookup_nat_cache(nm_i, nid); 1528 if (ne) { 1529 __set_nat_cache_dirty(nm_i, ne); 1530 write_unlock(&nm_i->nat_tree_lock); 1531 continue; 1532 } 1533 ne = grab_nat_entry(nm_i, nid); 1534 if (!ne) { 1535 write_unlock(&nm_i->nat_tree_lock); 1536 goto retry; 1537 } 1538 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); 1539 nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); 1540 nat_set_version(ne, raw_ne.version); 1541 __set_nat_cache_dirty(nm_i, ne); 1542 write_unlock(&nm_i->nat_tree_lock); 1543 } 1544 update_nats_in_cursum(sum, -i); 1545 mutex_unlock(&curseg->curseg_mutex); 1546 return true; 1547 } 1548 1549 /* 1550 * This function is called during the checkpointing process. 1551 */ 1552 void flush_nat_entries(struct f2fs_sb_info *sbi) 1553 { 1554 struct f2fs_nm_info *nm_i = NM_I(sbi); 1555 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1556 struct f2fs_summary_block *sum = curseg->sum_blk; 1557 struct list_head *cur, *n; 1558 struct page *page = NULL; 1559 struct f2fs_nat_block *nat_blk = NULL; 1560 nid_t start_nid = 0, end_nid = 0; 1561 bool flushed; 1562 1563 flushed = flush_nats_in_journal(sbi); 1564 1565 if (!flushed) 1566 mutex_lock(&curseg->curseg_mutex); 1567 1568 /* 1) flush dirty nat caches */ 1569 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { 1570 struct nat_entry *ne; 1571 nid_t nid; 1572 struct f2fs_nat_entry raw_ne; 1573 int offset = -1; 1574 block_t old_blkaddr, new_blkaddr; 1575 1576 ne = list_entry(cur, struct nat_entry, list); 1577 nid = nat_get_nid(ne); 1578 1579 if (nat_get_blkaddr(ne) == NEW_ADDR) 1580 continue; 1581 if (flushed) 1582 goto to_nat_page; 1583 1584 /* if there is room for nat enries in curseg->sumpage */ 1585 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); 1586 if (offset >= 0) { 1587 raw_ne = nat_in_journal(sum, offset); 1588 old_blkaddr = le32_to_cpu(raw_ne.block_addr); 1589 goto flush_now; 1590 } 1591 to_nat_page: 1592 if (!page || (start_nid > nid || nid > end_nid)) { 1593 if (page) { 1594 f2fs_put_page(page, 1); 1595 page = NULL; 1596 } 1597 start_nid = START_NID(nid); 1598 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; 1599 1600 /* 1601 * get nat block with dirty flag, increased reference 1602 * count, mapped and lock 1603 */ 1604 page = get_next_nat_page(sbi, start_nid); 1605 nat_blk = page_address(page); 1606 } 1607 1608 BUG_ON(!nat_blk); 1609 raw_ne = nat_blk->entries[nid - start_nid]; 1610 old_blkaddr = le32_to_cpu(raw_ne.block_addr); 1611 flush_now: 1612 new_blkaddr = nat_get_blkaddr(ne); 1613 1614 raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); 1615 raw_ne.block_addr = cpu_to_le32(new_blkaddr); 1616 raw_ne.version = nat_get_version(ne); 1617 1618 if (offset < 0) { 1619 nat_blk->entries[nid - start_nid] = raw_ne; 1620 } else { 1621 nat_in_journal(sum, offset) = raw_ne; 1622 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1623 } 1624 1625 if (nat_get_blkaddr(ne) == NULL_ADDR) { 1626 write_lock(&nm_i->nat_tree_lock); 1627 __del_from_nat_cache(nm_i, ne); 1628 write_unlock(&nm_i->nat_tree_lock); 1629 1630 /* We can reuse this freed nid at this point */ 1631 add_free_nid(NM_I(sbi), nid); 1632 } else { 1633 write_lock(&nm_i->nat_tree_lock); 1634 __clear_nat_cache_dirty(nm_i, ne); 1635 ne->checkpointed = true; 1636 write_unlock(&nm_i->nat_tree_lock); 1637 } 1638 } 1639 if (!flushed) 1640 mutex_unlock(&curseg->curseg_mutex); 1641 f2fs_put_page(page, 1); 1642 1643 /* 2) shrink nat caches if necessary */ 1644 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); 1645 } 1646 1647 static int init_node_manager(struct f2fs_sb_info *sbi) 1648 { 1649 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1650 struct f2fs_nm_info *nm_i = NM_I(sbi); 1651 unsigned char *version_bitmap; 1652 unsigned int nat_segs, nat_blocks; 1653 1654 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1655 1656 /* segment_count_nat includes pair segment so divide to 2. */ 1657 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1658 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1659 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1660 nm_i->fcnt = 0; 1661 nm_i->nat_cnt = 0; 1662 1663 INIT_LIST_HEAD(&nm_i->free_nid_list); 1664 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1665 INIT_LIST_HEAD(&nm_i->nat_entries); 1666 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1667 1668 mutex_init(&nm_i->build_lock); 1669 spin_lock_init(&nm_i->free_nid_list_lock); 1670 rwlock_init(&nm_i->nat_tree_lock); 1671 1672 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1673 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1674 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1675 1676 nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL); 1677 if (!nm_i->nat_bitmap) 1678 return -ENOMEM; 1679 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1680 if (!version_bitmap) 1681 return -EFAULT; 1682 1683 /* copy version bitmap */ 1684 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size); 1685 return 0; 1686 } 1687 1688 int build_node_manager(struct f2fs_sb_info *sbi) 1689 { 1690 int err; 1691 1692 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1693 if (!sbi->nm_info) 1694 return -ENOMEM; 1695 1696 err = init_node_manager(sbi); 1697 if (err) 1698 return err; 1699 1700 build_free_nids(sbi); 1701 return 0; 1702 } 1703 1704 void destroy_node_manager(struct f2fs_sb_info *sbi) 1705 { 1706 struct f2fs_nm_info *nm_i = NM_I(sbi); 1707 struct free_nid *i, *next_i; 1708 struct nat_entry *natvec[NATVEC_SIZE]; 1709 nid_t nid = 0; 1710 unsigned int found; 1711 1712 if (!nm_i) 1713 return; 1714 1715 /* destroy free nid list */ 1716 spin_lock(&nm_i->free_nid_list_lock); 1717 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 1718 BUG_ON(i->state == NID_ALLOC); 1719 __del_from_free_nid_list(i); 1720 nm_i->fcnt--; 1721 } 1722 BUG_ON(nm_i->fcnt); 1723 spin_unlock(&nm_i->free_nid_list_lock); 1724 1725 /* destroy nat cache */ 1726 write_lock(&nm_i->nat_tree_lock); 1727 while ((found = __gang_lookup_nat_cache(nm_i, 1728 nid, NATVEC_SIZE, natvec))) { 1729 unsigned idx; 1730 for (idx = 0; idx < found; idx++) { 1731 struct nat_entry *e = natvec[idx]; 1732 nid = nat_get_nid(e) + 1; 1733 __del_from_nat_cache(nm_i, e); 1734 } 1735 } 1736 BUG_ON(nm_i->nat_cnt); 1737 write_unlock(&nm_i->nat_tree_lock); 1738 1739 kfree(nm_i->nat_bitmap); 1740 sbi->nm_info = NULL; 1741 kfree(nm_i); 1742 } 1743 1744 int create_node_manager_caches(void) 1745 { 1746 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1747 sizeof(struct nat_entry), NULL); 1748 if (!nat_entry_slab) 1749 return -ENOMEM; 1750 1751 free_nid_slab = f2fs_kmem_cache_create("free_nid", 1752 sizeof(struct free_nid), NULL); 1753 if (!free_nid_slab) { 1754 kmem_cache_destroy(nat_entry_slab); 1755 return -ENOMEM; 1756 } 1757 return 0; 1758 } 1759 1760 void destroy_node_manager_caches(void) 1761 { 1762 kmem_cache_destroy(free_nid_slab); 1763 kmem_cache_destroy(nat_entry_slab); 1764 } 1765