1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 23 static struct kmem_cache *nat_entry_slab; 24 static struct kmem_cache *free_nid_slab; 25 26 static void clear_node_page_dirty(struct page *page) 27 { 28 struct address_space *mapping = page->mapping; 29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 30 unsigned int long flags; 31 32 if (PageDirty(page)) { 33 spin_lock_irqsave(&mapping->tree_lock, flags); 34 radix_tree_tag_clear(&mapping->page_tree, 35 page_index(page), 36 PAGECACHE_TAG_DIRTY); 37 spin_unlock_irqrestore(&mapping->tree_lock, flags); 38 39 clear_page_dirty_for_io(page); 40 dec_page_count(sbi, F2FS_DIRTY_NODES); 41 } 42 ClearPageUptodate(page); 43 } 44 45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 46 { 47 pgoff_t index = current_nat_addr(sbi, nid); 48 return get_meta_page(sbi, index); 49 } 50 51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 52 { 53 struct page *src_page; 54 struct page *dst_page; 55 pgoff_t src_off; 56 pgoff_t dst_off; 57 void *src_addr; 58 void *dst_addr; 59 struct f2fs_nm_info *nm_i = NM_I(sbi); 60 61 src_off = current_nat_addr(sbi, nid); 62 dst_off = next_nat_addr(sbi, src_off); 63 64 /* get current nat block page with lock */ 65 src_page = get_meta_page(sbi, src_off); 66 67 /* Dirty src_page means that it is already the new target NAT page. */ 68 if (PageDirty(src_page)) 69 return src_page; 70 71 dst_page = grab_meta_page(sbi, dst_off); 72 73 src_addr = page_address(src_page); 74 dst_addr = page_address(dst_page); 75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 76 set_page_dirty(dst_page); 77 f2fs_put_page(src_page, 1); 78 79 set_to_next_nat(nm_i, nid); 80 81 return dst_page; 82 } 83 84 /* 85 * Readahead NAT pages 86 */ 87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) 88 { 89 struct address_space *mapping = sbi->meta_inode->i_mapping; 90 struct f2fs_nm_info *nm_i = NM_I(sbi); 91 struct page *page; 92 pgoff_t index; 93 int i; 94 95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { 96 if (nid >= nm_i->max_nid) 97 nid = 0; 98 index = current_nat_addr(sbi, nid); 99 100 page = grab_cache_page(mapping, index); 101 if (!page) 102 continue; 103 if (f2fs_readpage(sbi, page, index, READ)) { 104 f2fs_put_page(page, 1); 105 continue; 106 } 107 f2fs_put_page(page, 0); 108 } 109 } 110 111 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 112 { 113 return radix_tree_lookup(&nm_i->nat_root, n); 114 } 115 116 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 117 nid_t start, unsigned int nr, struct nat_entry **ep) 118 { 119 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 120 } 121 122 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 123 { 124 list_del(&e->list); 125 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 126 nm_i->nat_cnt--; 127 kmem_cache_free(nat_entry_slab, e); 128 } 129 130 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 131 { 132 struct f2fs_nm_info *nm_i = NM_I(sbi); 133 struct nat_entry *e; 134 int is_cp = 1; 135 136 read_lock(&nm_i->nat_tree_lock); 137 e = __lookup_nat_cache(nm_i, nid); 138 if (e && !e->checkpointed) 139 is_cp = 0; 140 read_unlock(&nm_i->nat_tree_lock); 141 return is_cp; 142 } 143 144 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 145 { 146 struct nat_entry *new; 147 148 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 149 if (!new) 150 return NULL; 151 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 152 kmem_cache_free(nat_entry_slab, new); 153 return NULL; 154 } 155 memset(new, 0, sizeof(struct nat_entry)); 156 nat_set_nid(new, nid); 157 list_add_tail(&new->list, &nm_i->nat_entries); 158 nm_i->nat_cnt++; 159 return new; 160 } 161 162 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 163 struct f2fs_nat_entry *ne) 164 { 165 struct nat_entry *e; 166 retry: 167 write_lock(&nm_i->nat_tree_lock); 168 e = __lookup_nat_cache(nm_i, nid); 169 if (!e) { 170 e = grab_nat_entry(nm_i, nid); 171 if (!e) { 172 write_unlock(&nm_i->nat_tree_lock); 173 goto retry; 174 } 175 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); 176 nat_set_ino(e, le32_to_cpu(ne->ino)); 177 nat_set_version(e, ne->version); 178 e->checkpointed = true; 179 } 180 write_unlock(&nm_i->nat_tree_lock); 181 } 182 183 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 184 block_t new_blkaddr) 185 { 186 struct f2fs_nm_info *nm_i = NM_I(sbi); 187 struct nat_entry *e; 188 retry: 189 write_lock(&nm_i->nat_tree_lock); 190 e = __lookup_nat_cache(nm_i, ni->nid); 191 if (!e) { 192 e = grab_nat_entry(nm_i, ni->nid); 193 if (!e) { 194 write_unlock(&nm_i->nat_tree_lock); 195 goto retry; 196 } 197 e->ni = *ni; 198 e->checkpointed = true; 199 BUG_ON(ni->blk_addr == NEW_ADDR); 200 } else if (new_blkaddr == NEW_ADDR) { 201 /* 202 * when nid is reallocated, 203 * previous nat entry can be remained in nat cache. 204 * So, reinitialize it with new information. 205 */ 206 e->ni = *ni; 207 BUG_ON(ni->blk_addr != NULL_ADDR); 208 } 209 210 if (new_blkaddr == NEW_ADDR) 211 e->checkpointed = false; 212 213 /* sanity check */ 214 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr); 215 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR && 216 new_blkaddr == NULL_ADDR); 217 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR && 218 new_blkaddr == NEW_ADDR); 219 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR && 220 nat_get_blkaddr(e) != NULL_ADDR && 221 new_blkaddr == NEW_ADDR); 222 223 /* increament version no as node is removed */ 224 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 225 unsigned char version = nat_get_version(e); 226 nat_set_version(e, inc_node_version(version)); 227 } 228 229 /* change address */ 230 nat_set_blkaddr(e, new_blkaddr); 231 __set_nat_cache_dirty(nm_i, e); 232 write_unlock(&nm_i->nat_tree_lock); 233 } 234 235 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 236 { 237 struct f2fs_nm_info *nm_i = NM_I(sbi); 238 239 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD) 240 return 0; 241 242 write_lock(&nm_i->nat_tree_lock); 243 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 244 struct nat_entry *ne; 245 ne = list_first_entry(&nm_i->nat_entries, 246 struct nat_entry, list); 247 __del_from_nat_cache(nm_i, ne); 248 nr_shrink--; 249 } 250 write_unlock(&nm_i->nat_tree_lock); 251 return nr_shrink; 252 } 253 254 /* 255 * This function returns always success 256 */ 257 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 258 { 259 struct f2fs_nm_info *nm_i = NM_I(sbi); 260 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 261 struct f2fs_summary_block *sum = curseg->sum_blk; 262 nid_t start_nid = START_NID(nid); 263 struct f2fs_nat_block *nat_blk; 264 struct page *page = NULL; 265 struct f2fs_nat_entry ne; 266 struct nat_entry *e; 267 int i; 268 269 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 270 ni->nid = nid; 271 272 /* Check nat cache */ 273 read_lock(&nm_i->nat_tree_lock); 274 e = __lookup_nat_cache(nm_i, nid); 275 if (e) { 276 ni->ino = nat_get_ino(e); 277 ni->blk_addr = nat_get_blkaddr(e); 278 ni->version = nat_get_version(e); 279 } 280 read_unlock(&nm_i->nat_tree_lock); 281 if (e) 282 return; 283 284 /* Check current segment summary */ 285 mutex_lock(&curseg->curseg_mutex); 286 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 287 if (i >= 0) { 288 ne = nat_in_journal(sum, i); 289 node_info_from_raw_nat(ni, &ne); 290 } 291 mutex_unlock(&curseg->curseg_mutex); 292 if (i >= 0) 293 goto cache; 294 295 /* Fill node_info from nat page */ 296 page = get_current_nat_page(sbi, start_nid); 297 nat_blk = (struct f2fs_nat_block *)page_address(page); 298 ne = nat_blk->entries[nid - start_nid]; 299 node_info_from_raw_nat(ni, &ne); 300 f2fs_put_page(page, 1); 301 cache: 302 /* cache nat entry */ 303 cache_nat_entry(NM_I(sbi), nid, &ne); 304 } 305 306 /* 307 * The maximum depth is four. 308 * Offset[0] will have raw inode offset. 309 */ 310 static int get_node_path(long block, int offset[4], unsigned int noffset[4]) 311 { 312 const long direct_index = ADDRS_PER_INODE; 313 const long direct_blks = ADDRS_PER_BLOCK; 314 const long dptrs_per_blk = NIDS_PER_BLOCK; 315 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 316 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 317 int n = 0; 318 int level = 0; 319 320 noffset[0] = 0; 321 322 if (block < direct_index) { 323 offset[n++] = block; 324 level = 0; 325 goto got; 326 } 327 block -= direct_index; 328 if (block < direct_blks) { 329 offset[n++] = NODE_DIR1_BLOCK; 330 noffset[n] = 1; 331 offset[n++] = block; 332 level = 1; 333 goto got; 334 } 335 block -= direct_blks; 336 if (block < direct_blks) { 337 offset[n++] = NODE_DIR2_BLOCK; 338 noffset[n] = 2; 339 offset[n++] = block; 340 level = 1; 341 goto got; 342 } 343 block -= direct_blks; 344 if (block < indirect_blks) { 345 offset[n++] = NODE_IND1_BLOCK; 346 noffset[n] = 3; 347 offset[n++] = block / direct_blks; 348 noffset[n] = 4 + offset[n - 1]; 349 offset[n++] = block % direct_blks; 350 level = 2; 351 goto got; 352 } 353 block -= indirect_blks; 354 if (block < indirect_blks) { 355 offset[n++] = NODE_IND2_BLOCK; 356 noffset[n] = 4 + dptrs_per_blk; 357 offset[n++] = block / direct_blks; 358 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 359 offset[n++] = block % direct_blks; 360 level = 2; 361 goto got; 362 } 363 block -= indirect_blks; 364 if (block < dindirect_blks) { 365 offset[n++] = NODE_DIND_BLOCK; 366 noffset[n] = 5 + (dptrs_per_blk * 2); 367 offset[n++] = block / indirect_blks; 368 noffset[n] = 6 + (dptrs_per_blk * 2) + 369 offset[n - 1] * (dptrs_per_blk + 1); 370 offset[n++] = (block / direct_blks) % dptrs_per_blk; 371 noffset[n] = 7 + (dptrs_per_blk * 2) + 372 offset[n - 2] * (dptrs_per_blk + 1) + 373 offset[n - 1]; 374 offset[n++] = block % direct_blks; 375 level = 3; 376 goto got; 377 } else { 378 BUG(); 379 } 380 got: 381 return level; 382 } 383 384 /* 385 * Caller should call f2fs_put_dnode(dn). 386 */ 387 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro) 388 { 389 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 390 struct page *npage[4]; 391 struct page *parent; 392 int offset[4]; 393 unsigned int noffset[4]; 394 nid_t nids[4]; 395 int level, i; 396 int err = 0; 397 398 level = get_node_path(index, offset, noffset); 399 400 nids[0] = dn->inode->i_ino; 401 npage[0] = get_node_page(sbi, nids[0]); 402 if (IS_ERR(npage[0])) 403 return PTR_ERR(npage[0]); 404 405 parent = npage[0]; 406 nids[1] = get_nid(parent, offset[0], true); 407 dn->inode_page = npage[0]; 408 dn->inode_page_locked = true; 409 410 /* get indirect or direct nodes */ 411 for (i = 1; i <= level; i++) { 412 bool done = false; 413 414 if (!nids[i] && !ro) { 415 mutex_lock_op(sbi, NODE_NEW); 416 417 /* alloc new node */ 418 if (!alloc_nid(sbi, &(nids[i]))) { 419 mutex_unlock_op(sbi, NODE_NEW); 420 err = -ENOSPC; 421 goto release_pages; 422 } 423 424 dn->nid = nids[i]; 425 npage[i] = new_node_page(dn, noffset[i]); 426 if (IS_ERR(npage[i])) { 427 alloc_nid_failed(sbi, nids[i]); 428 mutex_unlock_op(sbi, NODE_NEW); 429 err = PTR_ERR(npage[i]); 430 goto release_pages; 431 } 432 433 set_nid(parent, offset[i - 1], nids[i], i == 1); 434 alloc_nid_done(sbi, nids[i]); 435 mutex_unlock_op(sbi, NODE_NEW); 436 done = true; 437 } else if (ro && i == level && level > 1) { 438 npage[i] = get_node_page_ra(parent, offset[i - 1]); 439 if (IS_ERR(npage[i])) { 440 err = PTR_ERR(npage[i]); 441 goto release_pages; 442 } 443 done = true; 444 } 445 if (i == 1) { 446 dn->inode_page_locked = false; 447 unlock_page(parent); 448 } else { 449 f2fs_put_page(parent, 1); 450 } 451 452 if (!done) { 453 npage[i] = get_node_page(sbi, nids[i]); 454 if (IS_ERR(npage[i])) { 455 err = PTR_ERR(npage[i]); 456 f2fs_put_page(npage[0], 0); 457 goto release_out; 458 } 459 } 460 if (i < level) { 461 parent = npage[i]; 462 nids[i + 1] = get_nid(parent, offset[i], false); 463 } 464 } 465 dn->nid = nids[level]; 466 dn->ofs_in_node = offset[level]; 467 dn->node_page = npage[level]; 468 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 469 return 0; 470 471 release_pages: 472 f2fs_put_page(parent, 1); 473 if (i > 1) 474 f2fs_put_page(npage[0], 0); 475 release_out: 476 dn->inode_page = NULL; 477 dn->node_page = NULL; 478 return err; 479 } 480 481 static void truncate_node(struct dnode_of_data *dn) 482 { 483 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 484 struct node_info ni; 485 486 get_node_info(sbi, dn->nid, &ni); 487 if (dn->inode->i_blocks == 0) { 488 BUG_ON(ni.blk_addr != NULL_ADDR); 489 goto invalidate; 490 } 491 BUG_ON(ni.blk_addr == NULL_ADDR); 492 493 /* Deallocate node address */ 494 invalidate_blocks(sbi, ni.blk_addr); 495 dec_valid_node_count(sbi, dn->inode, 1); 496 set_node_addr(sbi, &ni, NULL_ADDR); 497 498 if (dn->nid == dn->inode->i_ino) { 499 remove_orphan_inode(sbi, dn->nid); 500 dec_valid_inode_count(sbi); 501 } else { 502 sync_inode_page(dn); 503 } 504 invalidate: 505 clear_node_page_dirty(dn->node_page); 506 F2FS_SET_SB_DIRT(sbi); 507 508 f2fs_put_page(dn->node_page, 1); 509 dn->node_page = NULL; 510 } 511 512 static int truncate_dnode(struct dnode_of_data *dn) 513 { 514 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 515 struct page *page; 516 517 if (dn->nid == 0) 518 return 1; 519 520 /* get direct node */ 521 page = get_node_page(sbi, dn->nid); 522 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 523 return 1; 524 else if (IS_ERR(page)) 525 return PTR_ERR(page); 526 527 /* Make dnode_of_data for parameter */ 528 dn->node_page = page; 529 dn->ofs_in_node = 0; 530 truncate_data_blocks(dn); 531 truncate_node(dn); 532 return 1; 533 } 534 535 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 536 int ofs, int depth) 537 { 538 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 539 struct dnode_of_data rdn = *dn; 540 struct page *page; 541 struct f2fs_node *rn; 542 nid_t child_nid; 543 unsigned int child_nofs; 544 int freed = 0; 545 int i, ret; 546 547 if (dn->nid == 0) 548 return NIDS_PER_BLOCK + 1; 549 550 page = get_node_page(sbi, dn->nid); 551 if (IS_ERR(page)) 552 return PTR_ERR(page); 553 554 rn = (struct f2fs_node *)page_address(page); 555 if (depth < 3) { 556 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 557 child_nid = le32_to_cpu(rn->in.nid[i]); 558 if (child_nid == 0) 559 continue; 560 rdn.nid = child_nid; 561 ret = truncate_dnode(&rdn); 562 if (ret < 0) 563 goto out_err; 564 set_nid(page, i, 0, false); 565 } 566 } else { 567 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 568 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 569 child_nid = le32_to_cpu(rn->in.nid[i]); 570 if (child_nid == 0) { 571 child_nofs += NIDS_PER_BLOCK + 1; 572 continue; 573 } 574 rdn.nid = child_nid; 575 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 576 if (ret == (NIDS_PER_BLOCK + 1)) { 577 set_nid(page, i, 0, false); 578 child_nofs += ret; 579 } else if (ret < 0 && ret != -ENOENT) { 580 goto out_err; 581 } 582 } 583 freed = child_nofs; 584 } 585 586 if (!ofs) { 587 /* remove current indirect node */ 588 dn->node_page = page; 589 truncate_node(dn); 590 freed++; 591 } else { 592 f2fs_put_page(page, 1); 593 } 594 return freed; 595 596 out_err: 597 f2fs_put_page(page, 1); 598 return ret; 599 } 600 601 static int truncate_partial_nodes(struct dnode_of_data *dn, 602 struct f2fs_inode *ri, int *offset, int depth) 603 { 604 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 605 struct page *pages[2]; 606 nid_t nid[3]; 607 nid_t child_nid; 608 int err = 0; 609 int i; 610 int idx = depth - 2; 611 612 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 613 if (!nid[0]) 614 return 0; 615 616 /* get indirect nodes in the path */ 617 for (i = 0; i < depth - 1; i++) { 618 /* refernece count'll be increased */ 619 pages[i] = get_node_page(sbi, nid[i]); 620 if (IS_ERR(pages[i])) { 621 depth = i + 1; 622 err = PTR_ERR(pages[i]); 623 goto fail; 624 } 625 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 626 } 627 628 /* free direct nodes linked to a partial indirect node */ 629 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) { 630 child_nid = get_nid(pages[idx], i, false); 631 if (!child_nid) 632 continue; 633 dn->nid = child_nid; 634 err = truncate_dnode(dn); 635 if (err < 0) 636 goto fail; 637 set_nid(pages[idx], i, 0, false); 638 } 639 640 if (offset[depth - 1] == 0) { 641 dn->node_page = pages[idx]; 642 dn->nid = nid[idx]; 643 truncate_node(dn); 644 } else { 645 f2fs_put_page(pages[idx], 1); 646 } 647 offset[idx]++; 648 offset[depth - 1] = 0; 649 fail: 650 for (i = depth - 3; i >= 0; i--) 651 f2fs_put_page(pages[i], 1); 652 return err; 653 } 654 655 /* 656 * All the block addresses of data and nodes should be nullified. 657 */ 658 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 659 { 660 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 661 int err = 0, cont = 1; 662 int level, offset[4], noffset[4]; 663 unsigned int nofs = 0; 664 struct f2fs_node *rn; 665 struct dnode_of_data dn; 666 struct page *page; 667 668 level = get_node_path(from, offset, noffset); 669 670 page = get_node_page(sbi, inode->i_ino); 671 if (IS_ERR(page)) 672 return PTR_ERR(page); 673 674 set_new_dnode(&dn, inode, page, NULL, 0); 675 unlock_page(page); 676 677 rn = page_address(page); 678 switch (level) { 679 case 0: 680 case 1: 681 nofs = noffset[1]; 682 break; 683 case 2: 684 nofs = noffset[1]; 685 if (!offset[level - 1]) 686 goto skip_partial; 687 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 688 if (err < 0 && err != -ENOENT) 689 goto fail; 690 nofs += 1 + NIDS_PER_BLOCK; 691 break; 692 case 3: 693 nofs = 5 + 2 * NIDS_PER_BLOCK; 694 if (!offset[level - 1]) 695 goto skip_partial; 696 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 697 if (err < 0 && err != -ENOENT) 698 goto fail; 699 break; 700 default: 701 BUG(); 702 } 703 704 skip_partial: 705 while (cont) { 706 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]); 707 switch (offset[0]) { 708 case NODE_DIR1_BLOCK: 709 case NODE_DIR2_BLOCK: 710 err = truncate_dnode(&dn); 711 break; 712 713 case NODE_IND1_BLOCK: 714 case NODE_IND2_BLOCK: 715 err = truncate_nodes(&dn, nofs, offset[1], 2); 716 break; 717 718 case NODE_DIND_BLOCK: 719 err = truncate_nodes(&dn, nofs, offset[1], 3); 720 cont = 0; 721 break; 722 723 default: 724 BUG(); 725 } 726 if (err < 0 && err != -ENOENT) 727 goto fail; 728 if (offset[1] == 0 && 729 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) { 730 lock_page(page); 731 wait_on_page_writeback(page); 732 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 733 set_page_dirty(page); 734 unlock_page(page); 735 } 736 offset[1] = 0; 737 offset[0]++; 738 nofs += err; 739 } 740 fail: 741 f2fs_put_page(page, 0); 742 return err > 0 ? 0 : err; 743 } 744 745 int remove_inode_page(struct inode *inode) 746 { 747 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 748 struct page *page; 749 nid_t ino = inode->i_ino; 750 struct dnode_of_data dn; 751 752 mutex_lock_op(sbi, NODE_TRUNC); 753 page = get_node_page(sbi, ino); 754 if (IS_ERR(page)) { 755 mutex_unlock_op(sbi, NODE_TRUNC); 756 return PTR_ERR(page); 757 } 758 759 if (F2FS_I(inode)->i_xattr_nid) { 760 nid_t nid = F2FS_I(inode)->i_xattr_nid; 761 struct page *npage = get_node_page(sbi, nid); 762 763 if (IS_ERR(npage)) { 764 mutex_unlock_op(sbi, NODE_TRUNC); 765 return PTR_ERR(npage); 766 } 767 768 F2FS_I(inode)->i_xattr_nid = 0; 769 set_new_dnode(&dn, inode, page, npage, nid); 770 dn.inode_page_locked = 1; 771 truncate_node(&dn); 772 } 773 774 /* 0 is possible, after f2fs_new_inode() is failed */ 775 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1); 776 set_new_dnode(&dn, inode, page, page, ino); 777 truncate_node(&dn); 778 779 mutex_unlock_op(sbi, NODE_TRUNC); 780 return 0; 781 } 782 783 int new_inode_page(struct inode *inode, const struct qstr *name) 784 { 785 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 786 struct page *page; 787 struct dnode_of_data dn; 788 789 /* allocate inode page for new inode */ 790 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 791 mutex_lock_op(sbi, NODE_NEW); 792 page = new_node_page(&dn, 0); 793 init_dent_inode(name, page); 794 mutex_unlock_op(sbi, NODE_NEW); 795 if (IS_ERR(page)) 796 return PTR_ERR(page); 797 f2fs_put_page(page, 1); 798 return 0; 799 } 800 801 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) 802 { 803 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 804 struct address_space *mapping = sbi->node_inode->i_mapping; 805 struct node_info old_ni, new_ni; 806 struct page *page; 807 int err; 808 809 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) 810 return ERR_PTR(-EPERM); 811 812 page = grab_cache_page(mapping, dn->nid); 813 if (!page) 814 return ERR_PTR(-ENOMEM); 815 816 get_node_info(sbi, dn->nid, &old_ni); 817 818 SetPageUptodate(page); 819 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 820 821 /* Reinitialize old_ni with new node page */ 822 BUG_ON(old_ni.blk_addr != NULL_ADDR); 823 new_ni = old_ni; 824 new_ni.ino = dn->inode->i_ino; 825 826 if (!inc_valid_node_count(sbi, dn->inode, 1)) { 827 err = -ENOSPC; 828 goto fail; 829 } 830 set_node_addr(sbi, &new_ni, NEW_ADDR); 831 set_cold_node(dn->inode, page); 832 833 dn->node_page = page; 834 sync_inode_page(dn); 835 set_page_dirty(page); 836 if (ofs == 0) 837 inc_valid_inode_count(sbi); 838 839 return page; 840 841 fail: 842 clear_node_page_dirty(page); 843 f2fs_put_page(page, 1); 844 return ERR_PTR(err); 845 } 846 847 static int read_node_page(struct page *page, int type) 848 { 849 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 850 struct node_info ni; 851 852 get_node_info(sbi, page->index, &ni); 853 854 if (ni.blk_addr == NULL_ADDR) 855 return -ENOENT; 856 return f2fs_readpage(sbi, page, ni.blk_addr, type); 857 } 858 859 /* 860 * Readahead a node page 861 */ 862 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 863 { 864 struct address_space *mapping = sbi->node_inode->i_mapping; 865 struct page *apage; 866 867 apage = find_get_page(mapping, nid); 868 if (apage && PageUptodate(apage)) 869 goto release_out; 870 f2fs_put_page(apage, 0); 871 872 apage = grab_cache_page(mapping, nid); 873 if (!apage) 874 return; 875 876 if (read_node_page(apage, READA)) 877 unlock_page(apage); 878 879 release_out: 880 f2fs_put_page(apage, 0); 881 return; 882 } 883 884 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 885 { 886 int err; 887 struct page *page; 888 struct address_space *mapping = sbi->node_inode->i_mapping; 889 890 page = grab_cache_page(mapping, nid); 891 if (!page) 892 return ERR_PTR(-ENOMEM); 893 894 err = read_node_page(page, READ_SYNC); 895 if (err) { 896 f2fs_put_page(page, 1); 897 return ERR_PTR(err); 898 } 899 900 BUG_ON(nid != nid_of_node(page)); 901 mark_page_accessed(page); 902 return page; 903 } 904 905 /* 906 * Return a locked page for the desired node page. 907 * And, readahead MAX_RA_NODE number of node pages. 908 */ 909 struct page *get_node_page_ra(struct page *parent, int start) 910 { 911 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 912 struct address_space *mapping = sbi->node_inode->i_mapping; 913 int i, end; 914 int err = 0; 915 nid_t nid; 916 struct page *page; 917 918 /* First, try getting the desired direct node. */ 919 nid = get_nid(parent, start, false); 920 if (!nid) 921 return ERR_PTR(-ENOENT); 922 923 page = find_get_page(mapping, nid); 924 if (page && PageUptodate(page)) 925 goto page_hit; 926 f2fs_put_page(page, 0); 927 928 repeat: 929 page = grab_cache_page(mapping, nid); 930 if (!page) 931 return ERR_PTR(-ENOMEM); 932 933 err = read_node_page(page, READA); 934 if (err) { 935 f2fs_put_page(page, 1); 936 return ERR_PTR(err); 937 } 938 939 /* Then, try readahead for siblings of the desired node */ 940 end = start + MAX_RA_NODE; 941 end = min(end, NIDS_PER_BLOCK); 942 for (i = start + 1; i < end; i++) { 943 nid = get_nid(parent, i, false); 944 if (!nid) 945 continue; 946 ra_node_page(sbi, nid); 947 } 948 949 page_hit: 950 lock_page(page); 951 if (PageError(page)) { 952 f2fs_put_page(page, 1); 953 return ERR_PTR(-EIO); 954 } 955 956 /* Has the page been truncated? */ 957 if (page->mapping != mapping) { 958 f2fs_put_page(page, 1); 959 goto repeat; 960 } 961 return page; 962 } 963 964 void sync_inode_page(struct dnode_of_data *dn) 965 { 966 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 967 update_inode(dn->inode, dn->node_page); 968 } else if (dn->inode_page) { 969 if (!dn->inode_page_locked) 970 lock_page(dn->inode_page); 971 update_inode(dn->inode, dn->inode_page); 972 if (!dn->inode_page_locked) 973 unlock_page(dn->inode_page); 974 } else { 975 f2fs_write_inode(dn->inode, NULL); 976 } 977 } 978 979 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 980 struct writeback_control *wbc) 981 { 982 struct address_space *mapping = sbi->node_inode->i_mapping; 983 pgoff_t index, end; 984 struct pagevec pvec; 985 int step = ino ? 2 : 0; 986 int nwritten = 0, wrote = 0; 987 988 pagevec_init(&pvec, 0); 989 990 next_step: 991 index = 0; 992 end = LONG_MAX; 993 994 while (index <= end) { 995 int i, nr_pages; 996 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 997 PAGECACHE_TAG_DIRTY, 998 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 999 if (nr_pages == 0) 1000 break; 1001 1002 for (i = 0; i < nr_pages; i++) { 1003 struct page *page = pvec.pages[i]; 1004 1005 /* 1006 * flushing sequence with step: 1007 * 0. indirect nodes 1008 * 1. dentry dnodes 1009 * 2. file dnodes 1010 */ 1011 if (step == 0 && IS_DNODE(page)) 1012 continue; 1013 if (step == 1 && (!IS_DNODE(page) || 1014 is_cold_node(page))) 1015 continue; 1016 if (step == 2 && (!IS_DNODE(page) || 1017 !is_cold_node(page))) 1018 continue; 1019 1020 /* 1021 * If an fsync mode, 1022 * we should not skip writing node pages. 1023 */ 1024 if (ino && ino_of_node(page) == ino) 1025 lock_page(page); 1026 else if (!trylock_page(page)) 1027 continue; 1028 1029 if (unlikely(page->mapping != mapping)) { 1030 continue_unlock: 1031 unlock_page(page); 1032 continue; 1033 } 1034 if (ino && ino_of_node(page) != ino) 1035 goto continue_unlock; 1036 1037 if (!PageDirty(page)) { 1038 /* someone wrote it for us */ 1039 goto continue_unlock; 1040 } 1041 1042 if (!clear_page_dirty_for_io(page)) 1043 goto continue_unlock; 1044 1045 /* called by fsync() */ 1046 if (ino && IS_DNODE(page)) { 1047 int mark = !is_checkpointed_node(sbi, ino); 1048 set_fsync_mark(page, 1); 1049 if (IS_INODE(page)) 1050 set_dentry_mark(page, mark); 1051 nwritten++; 1052 } else { 1053 set_fsync_mark(page, 0); 1054 set_dentry_mark(page, 0); 1055 } 1056 mapping->a_ops->writepage(page, wbc); 1057 wrote++; 1058 1059 if (--wbc->nr_to_write == 0) 1060 break; 1061 } 1062 pagevec_release(&pvec); 1063 cond_resched(); 1064 1065 if (wbc->nr_to_write == 0) { 1066 step = 2; 1067 break; 1068 } 1069 } 1070 1071 if (step < 2) { 1072 step++; 1073 goto next_step; 1074 } 1075 1076 if (wrote) 1077 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL); 1078 1079 return nwritten; 1080 } 1081 1082 static int f2fs_write_node_page(struct page *page, 1083 struct writeback_control *wbc) 1084 { 1085 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1086 nid_t nid; 1087 block_t new_addr; 1088 struct node_info ni; 1089 1090 if (wbc->for_reclaim) { 1091 dec_page_count(sbi, F2FS_DIRTY_NODES); 1092 wbc->pages_skipped++; 1093 set_page_dirty(page); 1094 return AOP_WRITEPAGE_ACTIVATE; 1095 } 1096 1097 wait_on_page_writeback(page); 1098 1099 mutex_lock_op(sbi, NODE_WRITE); 1100 1101 /* get old block addr of this node page */ 1102 nid = nid_of_node(page); 1103 BUG_ON(page->index != nid); 1104 1105 get_node_info(sbi, nid, &ni); 1106 1107 /* This page is already truncated */ 1108 if (ni.blk_addr == NULL_ADDR) 1109 return 0; 1110 1111 set_page_writeback(page); 1112 1113 /* insert node offset */ 1114 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); 1115 set_node_addr(sbi, &ni, new_addr); 1116 dec_page_count(sbi, F2FS_DIRTY_NODES); 1117 1118 mutex_unlock_op(sbi, NODE_WRITE); 1119 unlock_page(page); 1120 return 0; 1121 } 1122 1123 /* 1124 * It is very important to gather dirty pages and write at once, so that we can 1125 * submit a big bio without interfering other data writes. 1126 * Be default, 512 pages (2MB), a segment size, is quite reasonable. 1127 */ 1128 #define COLLECT_DIRTY_NODES 512 1129 static int f2fs_write_node_pages(struct address_space *mapping, 1130 struct writeback_control *wbc) 1131 { 1132 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1133 struct block_device *bdev = sbi->sb->s_bdev; 1134 long nr_to_write = wbc->nr_to_write; 1135 1136 /* First check balancing cached NAT entries */ 1137 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1138 write_checkpoint(sbi, false); 1139 return 0; 1140 } 1141 1142 /* collect a number of dirty node pages and write together */ 1143 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) 1144 return 0; 1145 1146 /* if mounting is failed, skip writing node pages */ 1147 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1148 sync_node_pages(sbi, 0, wbc); 1149 wbc->nr_to_write = nr_to_write - 1150 (bio_get_nr_vecs(bdev) - wbc->nr_to_write); 1151 return 0; 1152 } 1153 1154 static int f2fs_set_node_page_dirty(struct page *page) 1155 { 1156 struct address_space *mapping = page->mapping; 1157 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1158 1159 SetPageUptodate(page); 1160 if (!PageDirty(page)) { 1161 __set_page_dirty_nobuffers(page); 1162 inc_page_count(sbi, F2FS_DIRTY_NODES); 1163 SetPagePrivate(page); 1164 return 1; 1165 } 1166 return 0; 1167 } 1168 1169 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) 1170 { 1171 struct inode *inode = page->mapping->host; 1172 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1173 if (PageDirty(page)) 1174 dec_page_count(sbi, F2FS_DIRTY_NODES); 1175 ClearPagePrivate(page); 1176 } 1177 1178 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1179 { 1180 ClearPagePrivate(page); 1181 return 0; 1182 } 1183 1184 /* 1185 * Structure of the f2fs node operations 1186 */ 1187 const struct address_space_operations f2fs_node_aops = { 1188 .writepage = f2fs_write_node_page, 1189 .writepages = f2fs_write_node_pages, 1190 .set_page_dirty = f2fs_set_node_page_dirty, 1191 .invalidatepage = f2fs_invalidate_node_page, 1192 .releasepage = f2fs_release_node_page, 1193 }; 1194 1195 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) 1196 { 1197 struct list_head *this; 1198 struct free_nid *i = NULL; 1199 list_for_each(this, head) { 1200 i = list_entry(this, struct free_nid, list); 1201 if (i->nid == n) 1202 break; 1203 i = NULL; 1204 } 1205 return i; 1206 } 1207 1208 static void __del_from_free_nid_list(struct free_nid *i) 1209 { 1210 list_del(&i->list); 1211 kmem_cache_free(free_nid_slab, i); 1212 } 1213 1214 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1215 { 1216 struct free_nid *i; 1217 1218 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) 1219 return 0; 1220 retry: 1221 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1222 if (!i) { 1223 cond_resched(); 1224 goto retry; 1225 } 1226 i->nid = nid; 1227 i->state = NID_NEW; 1228 1229 spin_lock(&nm_i->free_nid_list_lock); 1230 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { 1231 spin_unlock(&nm_i->free_nid_list_lock); 1232 kmem_cache_free(free_nid_slab, i); 1233 return 0; 1234 } 1235 list_add_tail(&i->list, &nm_i->free_nid_list); 1236 nm_i->fcnt++; 1237 spin_unlock(&nm_i->free_nid_list_lock); 1238 return 1; 1239 } 1240 1241 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1242 { 1243 struct free_nid *i; 1244 spin_lock(&nm_i->free_nid_list_lock); 1245 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1246 if (i && i->state == NID_NEW) { 1247 __del_from_free_nid_list(i); 1248 nm_i->fcnt--; 1249 } 1250 spin_unlock(&nm_i->free_nid_list_lock); 1251 } 1252 1253 static int scan_nat_page(struct f2fs_nm_info *nm_i, 1254 struct page *nat_page, nid_t start_nid) 1255 { 1256 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1257 block_t blk_addr; 1258 int fcnt = 0; 1259 int i; 1260 1261 /* 0 nid should not be used */ 1262 if (start_nid == 0) 1263 ++start_nid; 1264 1265 i = start_nid % NAT_ENTRY_PER_BLOCK; 1266 1267 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1268 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1269 BUG_ON(blk_addr == NEW_ADDR); 1270 if (blk_addr == NULL_ADDR) 1271 fcnt += add_free_nid(nm_i, start_nid); 1272 } 1273 return fcnt; 1274 } 1275 1276 static void build_free_nids(struct f2fs_sb_info *sbi) 1277 { 1278 struct free_nid *fnid, *next_fnid; 1279 struct f2fs_nm_info *nm_i = NM_I(sbi); 1280 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1281 struct f2fs_summary_block *sum = curseg->sum_blk; 1282 nid_t nid = 0; 1283 bool is_cycled = false; 1284 int fcnt = 0; 1285 int i; 1286 1287 nid = nm_i->next_scan_nid; 1288 nm_i->init_scan_nid = nid; 1289 1290 ra_nat_pages(sbi, nid); 1291 1292 while (1) { 1293 struct page *page = get_current_nat_page(sbi, nid); 1294 1295 fcnt += scan_nat_page(nm_i, page, nid); 1296 f2fs_put_page(page, 1); 1297 1298 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1299 1300 if (nid >= nm_i->max_nid) { 1301 nid = 0; 1302 is_cycled = true; 1303 } 1304 if (fcnt > MAX_FREE_NIDS) 1305 break; 1306 if (is_cycled && nm_i->init_scan_nid <= nid) 1307 break; 1308 } 1309 1310 nm_i->next_scan_nid = nid; 1311 1312 /* find free nids from current sum_pages */ 1313 mutex_lock(&curseg->curseg_mutex); 1314 for (i = 0; i < nats_in_cursum(sum); i++) { 1315 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1316 nid = le32_to_cpu(nid_in_journal(sum, i)); 1317 if (addr == NULL_ADDR) 1318 add_free_nid(nm_i, nid); 1319 else 1320 remove_free_nid(nm_i, nid); 1321 } 1322 mutex_unlock(&curseg->curseg_mutex); 1323 1324 /* remove the free nids from current allocated nids */ 1325 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) { 1326 struct nat_entry *ne; 1327 1328 read_lock(&nm_i->nat_tree_lock); 1329 ne = __lookup_nat_cache(nm_i, fnid->nid); 1330 if (ne && nat_get_blkaddr(ne) != NULL_ADDR) 1331 remove_free_nid(nm_i, fnid->nid); 1332 read_unlock(&nm_i->nat_tree_lock); 1333 } 1334 } 1335 1336 /* 1337 * If this function returns success, caller can obtain a new nid 1338 * from second parameter of this function. 1339 * The returned nid could be used ino as well as nid when inode is created. 1340 */ 1341 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1342 { 1343 struct f2fs_nm_info *nm_i = NM_I(sbi); 1344 struct free_nid *i = NULL; 1345 struct list_head *this; 1346 retry: 1347 mutex_lock(&nm_i->build_lock); 1348 if (!nm_i->fcnt) { 1349 /* scan NAT in order to build free nid list */ 1350 build_free_nids(sbi); 1351 if (!nm_i->fcnt) { 1352 mutex_unlock(&nm_i->build_lock); 1353 return false; 1354 } 1355 } 1356 mutex_unlock(&nm_i->build_lock); 1357 1358 /* 1359 * We check fcnt again since previous check is racy as 1360 * we didn't hold free_nid_list_lock. So other thread 1361 * could consume all of free nids. 1362 */ 1363 spin_lock(&nm_i->free_nid_list_lock); 1364 if (!nm_i->fcnt) { 1365 spin_unlock(&nm_i->free_nid_list_lock); 1366 goto retry; 1367 } 1368 1369 BUG_ON(list_empty(&nm_i->free_nid_list)); 1370 list_for_each(this, &nm_i->free_nid_list) { 1371 i = list_entry(this, struct free_nid, list); 1372 if (i->state == NID_NEW) 1373 break; 1374 } 1375 1376 BUG_ON(i->state != NID_NEW); 1377 *nid = i->nid; 1378 i->state = NID_ALLOC; 1379 nm_i->fcnt--; 1380 spin_unlock(&nm_i->free_nid_list_lock); 1381 return true; 1382 } 1383 1384 /* 1385 * alloc_nid() should be called prior to this function. 1386 */ 1387 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1388 { 1389 struct f2fs_nm_info *nm_i = NM_I(sbi); 1390 struct free_nid *i; 1391 1392 spin_lock(&nm_i->free_nid_list_lock); 1393 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1394 if (i) { 1395 BUG_ON(i->state != NID_ALLOC); 1396 __del_from_free_nid_list(i); 1397 } 1398 spin_unlock(&nm_i->free_nid_list_lock); 1399 } 1400 1401 /* 1402 * alloc_nid() should be called prior to this function. 1403 */ 1404 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1405 { 1406 alloc_nid_done(sbi, nid); 1407 add_free_nid(NM_I(sbi), nid); 1408 } 1409 1410 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, 1411 struct f2fs_summary *sum, struct node_info *ni, 1412 block_t new_blkaddr) 1413 { 1414 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); 1415 set_node_addr(sbi, ni, new_blkaddr); 1416 clear_node_page_dirty(page); 1417 } 1418 1419 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1420 { 1421 struct address_space *mapping = sbi->node_inode->i_mapping; 1422 struct f2fs_node *src, *dst; 1423 nid_t ino = ino_of_node(page); 1424 struct node_info old_ni, new_ni; 1425 struct page *ipage; 1426 1427 ipage = grab_cache_page(mapping, ino); 1428 if (!ipage) 1429 return -ENOMEM; 1430 1431 /* Should not use this inode from free nid list */ 1432 remove_free_nid(NM_I(sbi), ino); 1433 1434 get_node_info(sbi, ino, &old_ni); 1435 SetPageUptodate(ipage); 1436 fill_node_footer(ipage, ino, ino, 0, true); 1437 1438 src = (struct f2fs_node *)page_address(page); 1439 dst = (struct f2fs_node *)page_address(ipage); 1440 1441 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); 1442 dst->i.i_size = 0; 1443 dst->i.i_blocks = cpu_to_le64(1); 1444 dst->i.i_links = cpu_to_le32(1); 1445 dst->i.i_xattr_nid = 0; 1446 1447 new_ni = old_ni; 1448 new_ni.ino = ino; 1449 1450 set_node_addr(sbi, &new_ni, NEW_ADDR); 1451 inc_valid_inode_count(sbi); 1452 1453 f2fs_put_page(ipage, 1); 1454 return 0; 1455 } 1456 1457 int restore_node_summary(struct f2fs_sb_info *sbi, 1458 unsigned int segno, struct f2fs_summary_block *sum) 1459 { 1460 struct f2fs_node *rn; 1461 struct f2fs_summary *sum_entry; 1462 struct page *page; 1463 block_t addr; 1464 int i, last_offset; 1465 1466 /* alloc temporal page for read node */ 1467 page = alloc_page(GFP_NOFS | __GFP_ZERO); 1468 if (IS_ERR(page)) 1469 return PTR_ERR(page); 1470 lock_page(page); 1471 1472 /* scan the node segment */ 1473 last_offset = sbi->blocks_per_seg; 1474 addr = START_BLOCK(sbi, segno); 1475 sum_entry = &sum->entries[0]; 1476 1477 for (i = 0; i < last_offset; i++, sum_entry++) { 1478 if (f2fs_readpage(sbi, page, addr, READ_SYNC)) 1479 goto out; 1480 1481 rn = (struct f2fs_node *)page_address(page); 1482 sum_entry->nid = rn->footer.nid; 1483 sum_entry->version = 0; 1484 sum_entry->ofs_in_node = 0; 1485 addr++; 1486 1487 /* 1488 * In order to read next node page, 1489 * we must clear PageUptodate flag. 1490 */ 1491 ClearPageUptodate(page); 1492 } 1493 out: 1494 unlock_page(page); 1495 __free_pages(page, 0); 1496 return 0; 1497 } 1498 1499 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) 1500 { 1501 struct f2fs_nm_info *nm_i = NM_I(sbi); 1502 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1503 struct f2fs_summary_block *sum = curseg->sum_blk; 1504 int i; 1505 1506 mutex_lock(&curseg->curseg_mutex); 1507 1508 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { 1509 mutex_unlock(&curseg->curseg_mutex); 1510 return false; 1511 } 1512 1513 for (i = 0; i < nats_in_cursum(sum); i++) { 1514 struct nat_entry *ne; 1515 struct f2fs_nat_entry raw_ne; 1516 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1517 1518 raw_ne = nat_in_journal(sum, i); 1519 retry: 1520 write_lock(&nm_i->nat_tree_lock); 1521 ne = __lookup_nat_cache(nm_i, nid); 1522 if (ne) { 1523 __set_nat_cache_dirty(nm_i, ne); 1524 write_unlock(&nm_i->nat_tree_lock); 1525 continue; 1526 } 1527 ne = grab_nat_entry(nm_i, nid); 1528 if (!ne) { 1529 write_unlock(&nm_i->nat_tree_lock); 1530 goto retry; 1531 } 1532 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); 1533 nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); 1534 nat_set_version(ne, raw_ne.version); 1535 __set_nat_cache_dirty(nm_i, ne); 1536 write_unlock(&nm_i->nat_tree_lock); 1537 } 1538 update_nats_in_cursum(sum, -i); 1539 mutex_unlock(&curseg->curseg_mutex); 1540 return true; 1541 } 1542 1543 /* 1544 * This function is called during the checkpointing process. 1545 */ 1546 void flush_nat_entries(struct f2fs_sb_info *sbi) 1547 { 1548 struct f2fs_nm_info *nm_i = NM_I(sbi); 1549 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1550 struct f2fs_summary_block *sum = curseg->sum_blk; 1551 struct list_head *cur, *n; 1552 struct page *page = NULL; 1553 struct f2fs_nat_block *nat_blk = NULL; 1554 nid_t start_nid = 0, end_nid = 0; 1555 bool flushed; 1556 1557 flushed = flush_nats_in_journal(sbi); 1558 1559 if (!flushed) 1560 mutex_lock(&curseg->curseg_mutex); 1561 1562 /* 1) flush dirty nat caches */ 1563 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { 1564 struct nat_entry *ne; 1565 nid_t nid; 1566 struct f2fs_nat_entry raw_ne; 1567 int offset = -1; 1568 block_t new_blkaddr; 1569 1570 ne = list_entry(cur, struct nat_entry, list); 1571 nid = nat_get_nid(ne); 1572 1573 if (nat_get_blkaddr(ne) == NEW_ADDR) 1574 continue; 1575 if (flushed) 1576 goto to_nat_page; 1577 1578 /* if there is room for nat enries in curseg->sumpage */ 1579 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); 1580 if (offset >= 0) { 1581 raw_ne = nat_in_journal(sum, offset); 1582 goto flush_now; 1583 } 1584 to_nat_page: 1585 if (!page || (start_nid > nid || nid > end_nid)) { 1586 if (page) { 1587 f2fs_put_page(page, 1); 1588 page = NULL; 1589 } 1590 start_nid = START_NID(nid); 1591 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; 1592 1593 /* 1594 * get nat block with dirty flag, increased reference 1595 * count, mapped and lock 1596 */ 1597 page = get_next_nat_page(sbi, start_nid); 1598 nat_blk = page_address(page); 1599 } 1600 1601 BUG_ON(!nat_blk); 1602 raw_ne = nat_blk->entries[nid - start_nid]; 1603 flush_now: 1604 new_blkaddr = nat_get_blkaddr(ne); 1605 1606 raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); 1607 raw_ne.block_addr = cpu_to_le32(new_blkaddr); 1608 raw_ne.version = nat_get_version(ne); 1609 1610 if (offset < 0) { 1611 nat_blk->entries[nid - start_nid] = raw_ne; 1612 } else { 1613 nat_in_journal(sum, offset) = raw_ne; 1614 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1615 } 1616 1617 if (nat_get_blkaddr(ne) == NULL_ADDR) { 1618 write_lock(&nm_i->nat_tree_lock); 1619 __del_from_nat_cache(nm_i, ne); 1620 write_unlock(&nm_i->nat_tree_lock); 1621 1622 /* We can reuse this freed nid at this point */ 1623 add_free_nid(NM_I(sbi), nid); 1624 } else { 1625 write_lock(&nm_i->nat_tree_lock); 1626 __clear_nat_cache_dirty(nm_i, ne); 1627 ne->checkpointed = true; 1628 write_unlock(&nm_i->nat_tree_lock); 1629 } 1630 } 1631 if (!flushed) 1632 mutex_unlock(&curseg->curseg_mutex); 1633 f2fs_put_page(page, 1); 1634 1635 /* 2) shrink nat caches if necessary */ 1636 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); 1637 } 1638 1639 static int init_node_manager(struct f2fs_sb_info *sbi) 1640 { 1641 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1642 struct f2fs_nm_info *nm_i = NM_I(sbi); 1643 unsigned char *version_bitmap; 1644 unsigned int nat_segs, nat_blocks; 1645 1646 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1647 1648 /* segment_count_nat includes pair segment so divide to 2. */ 1649 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1650 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1651 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1652 nm_i->fcnt = 0; 1653 nm_i->nat_cnt = 0; 1654 1655 INIT_LIST_HEAD(&nm_i->free_nid_list); 1656 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1657 INIT_LIST_HEAD(&nm_i->nat_entries); 1658 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1659 1660 mutex_init(&nm_i->build_lock); 1661 spin_lock_init(&nm_i->free_nid_list_lock); 1662 rwlock_init(&nm_i->nat_tree_lock); 1663 1664 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1665 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1666 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1667 1668 nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL); 1669 if (!nm_i->nat_bitmap) 1670 return -ENOMEM; 1671 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1672 if (!version_bitmap) 1673 return -EFAULT; 1674 1675 /* copy version bitmap */ 1676 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size); 1677 return 0; 1678 } 1679 1680 int build_node_manager(struct f2fs_sb_info *sbi) 1681 { 1682 int err; 1683 1684 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1685 if (!sbi->nm_info) 1686 return -ENOMEM; 1687 1688 err = init_node_manager(sbi); 1689 if (err) 1690 return err; 1691 1692 build_free_nids(sbi); 1693 return 0; 1694 } 1695 1696 void destroy_node_manager(struct f2fs_sb_info *sbi) 1697 { 1698 struct f2fs_nm_info *nm_i = NM_I(sbi); 1699 struct free_nid *i, *next_i; 1700 struct nat_entry *natvec[NATVEC_SIZE]; 1701 nid_t nid = 0; 1702 unsigned int found; 1703 1704 if (!nm_i) 1705 return; 1706 1707 /* destroy free nid list */ 1708 spin_lock(&nm_i->free_nid_list_lock); 1709 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 1710 BUG_ON(i->state == NID_ALLOC); 1711 __del_from_free_nid_list(i); 1712 nm_i->fcnt--; 1713 } 1714 BUG_ON(nm_i->fcnt); 1715 spin_unlock(&nm_i->free_nid_list_lock); 1716 1717 /* destroy nat cache */ 1718 write_lock(&nm_i->nat_tree_lock); 1719 while ((found = __gang_lookup_nat_cache(nm_i, 1720 nid, NATVEC_SIZE, natvec))) { 1721 unsigned idx; 1722 for (idx = 0; idx < found; idx++) { 1723 struct nat_entry *e = natvec[idx]; 1724 nid = nat_get_nid(e) + 1; 1725 __del_from_nat_cache(nm_i, e); 1726 } 1727 } 1728 BUG_ON(nm_i->nat_cnt); 1729 write_unlock(&nm_i->nat_tree_lock); 1730 1731 kfree(nm_i->nat_bitmap); 1732 sbi->nm_info = NULL; 1733 kfree(nm_i); 1734 } 1735 1736 int __init create_node_manager_caches(void) 1737 { 1738 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1739 sizeof(struct nat_entry), NULL); 1740 if (!nat_entry_slab) 1741 return -ENOMEM; 1742 1743 free_nid_slab = f2fs_kmem_cache_create("free_nid", 1744 sizeof(struct free_nid), NULL); 1745 if (!free_nid_slab) { 1746 kmem_cache_destroy(nat_entry_slab); 1747 return -ENOMEM; 1748 } 1749 return 0; 1750 } 1751 1752 void destroy_node_manager_caches(void) 1753 { 1754 kmem_cache_destroy(free_nid_slab); 1755 kmem_cache_destroy(nat_entry_slab); 1756 } 1757