1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include <trace/events/f2fs.h> 23 24 static struct kmem_cache *nat_entry_slab; 25 static struct kmem_cache *free_nid_slab; 26 27 static void clear_node_page_dirty(struct page *page) 28 { 29 struct address_space *mapping = page->mapping; 30 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 31 unsigned int long flags; 32 33 if (PageDirty(page)) { 34 spin_lock_irqsave(&mapping->tree_lock, flags); 35 radix_tree_tag_clear(&mapping->page_tree, 36 page_index(page), 37 PAGECACHE_TAG_DIRTY); 38 spin_unlock_irqrestore(&mapping->tree_lock, flags); 39 40 clear_page_dirty_for_io(page); 41 dec_page_count(sbi, F2FS_DIRTY_NODES); 42 } 43 ClearPageUptodate(page); 44 } 45 46 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 47 { 48 pgoff_t index = current_nat_addr(sbi, nid); 49 return get_meta_page(sbi, index); 50 } 51 52 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 53 { 54 struct page *src_page; 55 struct page *dst_page; 56 pgoff_t src_off; 57 pgoff_t dst_off; 58 void *src_addr; 59 void *dst_addr; 60 struct f2fs_nm_info *nm_i = NM_I(sbi); 61 62 src_off = current_nat_addr(sbi, nid); 63 dst_off = next_nat_addr(sbi, src_off); 64 65 /* get current nat block page with lock */ 66 src_page = get_meta_page(sbi, src_off); 67 68 /* Dirty src_page means that it is already the new target NAT page. */ 69 if (PageDirty(src_page)) 70 return src_page; 71 72 dst_page = grab_meta_page(sbi, dst_off); 73 74 src_addr = page_address(src_page); 75 dst_addr = page_address(dst_page); 76 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 77 set_page_dirty(dst_page); 78 f2fs_put_page(src_page, 1); 79 80 set_to_next_nat(nm_i, nid); 81 82 return dst_page; 83 } 84 85 /* 86 * Readahead NAT pages 87 */ 88 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) 89 { 90 struct address_space *mapping = sbi->meta_inode->i_mapping; 91 struct f2fs_nm_info *nm_i = NM_I(sbi); 92 struct page *page; 93 pgoff_t index; 94 int i; 95 96 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { 97 if (nid >= nm_i->max_nid) 98 nid = 0; 99 index = current_nat_addr(sbi, nid); 100 101 page = grab_cache_page(mapping, index); 102 if (!page) 103 continue; 104 if (PageUptodate(page)) { 105 f2fs_put_page(page, 1); 106 continue; 107 } 108 if (f2fs_readpage(sbi, page, index, READ)) 109 continue; 110 111 f2fs_put_page(page, 0); 112 } 113 } 114 115 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 116 { 117 return radix_tree_lookup(&nm_i->nat_root, n); 118 } 119 120 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 121 nid_t start, unsigned int nr, struct nat_entry **ep) 122 { 123 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 124 } 125 126 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 127 { 128 list_del(&e->list); 129 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 130 nm_i->nat_cnt--; 131 kmem_cache_free(nat_entry_slab, e); 132 } 133 134 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 135 { 136 struct f2fs_nm_info *nm_i = NM_I(sbi); 137 struct nat_entry *e; 138 int is_cp = 1; 139 140 read_lock(&nm_i->nat_tree_lock); 141 e = __lookup_nat_cache(nm_i, nid); 142 if (e && !e->checkpointed) 143 is_cp = 0; 144 read_unlock(&nm_i->nat_tree_lock); 145 return is_cp; 146 } 147 148 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 149 { 150 struct nat_entry *new; 151 152 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 153 if (!new) 154 return NULL; 155 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 156 kmem_cache_free(nat_entry_slab, new); 157 return NULL; 158 } 159 memset(new, 0, sizeof(struct nat_entry)); 160 nat_set_nid(new, nid); 161 list_add_tail(&new->list, &nm_i->nat_entries); 162 nm_i->nat_cnt++; 163 return new; 164 } 165 166 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 167 struct f2fs_nat_entry *ne) 168 { 169 struct nat_entry *e; 170 retry: 171 write_lock(&nm_i->nat_tree_lock); 172 e = __lookup_nat_cache(nm_i, nid); 173 if (!e) { 174 e = grab_nat_entry(nm_i, nid); 175 if (!e) { 176 write_unlock(&nm_i->nat_tree_lock); 177 goto retry; 178 } 179 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); 180 nat_set_ino(e, le32_to_cpu(ne->ino)); 181 nat_set_version(e, ne->version); 182 e->checkpointed = true; 183 } 184 write_unlock(&nm_i->nat_tree_lock); 185 } 186 187 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 188 block_t new_blkaddr) 189 { 190 struct f2fs_nm_info *nm_i = NM_I(sbi); 191 struct nat_entry *e; 192 retry: 193 write_lock(&nm_i->nat_tree_lock); 194 e = __lookup_nat_cache(nm_i, ni->nid); 195 if (!e) { 196 e = grab_nat_entry(nm_i, ni->nid); 197 if (!e) { 198 write_unlock(&nm_i->nat_tree_lock); 199 goto retry; 200 } 201 e->ni = *ni; 202 e->checkpointed = true; 203 BUG_ON(ni->blk_addr == NEW_ADDR); 204 } else if (new_blkaddr == NEW_ADDR) { 205 /* 206 * when nid is reallocated, 207 * previous nat entry can be remained in nat cache. 208 * So, reinitialize it with new information. 209 */ 210 e->ni = *ni; 211 BUG_ON(ni->blk_addr != NULL_ADDR); 212 } 213 214 if (new_blkaddr == NEW_ADDR) 215 e->checkpointed = false; 216 217 /* sanity check */ 218 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr); 219 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR && 220 new_blkaddr == NULL_ADDR); 221 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR && 222 new_blkaddr == NEW_ADDR); 223 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR && 224 nat_get_blkaddr(e) != NULL_ADDR && 225 new_blkaddr == NEW_ADDR); 226 227 /* increament version no as node is removed */ 228 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 229 unsigned char version = nat_get_version(e); 230 nat_set_version(e, inc_node_version(version)); 231 } 232 233 /* change address */ 234 nat_set_blkaddr(e, new_blkaddr); 235 __set_nat_cache_dirty(nm_i, e); 236 write_unlock(&nm_i->nat_tree_lock); 237 } 238 239 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 240 { 241 struct f2fs_nm_info *nm_i = NM_I(sbi); 242 243 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD) 244 return 0; 245 246 write_lock(&nm_i->nat_tree_lock); 247 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 248 struct nat_entry *ne; 249 ne = list_first_entry(&nm_i->nat_entries, 250 struct nat_entry, list); 251 __del_from_nat_cache(nm_i, ne); 252 nr_shrink--; 253 } 254 write_unlock(&nm_i->nat_tree_lock); 255 return nr_shrink; 256 } 257 258 /* 259 * This function returns always success 260 */ 261 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 262 { 263 struct f2fs_nm_info *nm_i = NM_I(sbi); 264 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 265 struct f2fs_summary_block *sum = curseg->sum_blk; 266 nid_t start_nid = START_NID(nid); 267 struct f2fs_nat_block *nat_blk; 268 struct page *page = NULL; 269 struct f2fs_nat_entry ne; 270 struct nat_entry *e; 271 int i; 272 273 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 274 ni->nid = nid; 275 276 /* Check nat cache */ 277 read_lock(&nm_i->nat_tree_lock); 278 e = __lookup_nat_cache(nm_i, nid); 279 if (e) { 280 ni->ino = nat_get_ino(e); 281 ni->blk_addr = nat_get_blkaddr(e); 282 ni->version = nat_get_version(e); 283 } 284 read_unlock(&nm_i->nat_tree_lock); 285 if (e) 286 return; 287 288 /* Check current segment summary */ 289 mutex_lock(&curseg->curseg_mutex); 290 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 291 if (i >= 0) { 292 ne = nat_in_journal(sum, i); 293 node_info_from_raw_nat(ni, &ne); 294 } 295 mutex_unlock(&curseg->curseg_mutex); 296 if (i >= 0) 297 goto cache; 298 299 /* Fill node_info from nat page */ 300 page = get_current_nat_page(sbi, start_nid); 301 nat_blk = (struct f2fs_nat_block *)page_address(page); 302 ne = nat_blk->entries[nid - start_nid]; 303 node_info_from_raw_nat(ni, &ne); 304 f2fs_put_page(page, 1); 305 cache: 306 /* cache nat entry */ 307 cache_nat_entry(NM_I(sbi), nid, &ne); 308 } 309 310 /* 311 * The maximum depth is four. 312 * Offset[0] will have raw inode offset. 313 */ 314 static int get_node_path(long block, int offset[4], unsigned int noffset[4]) 315 { 316 const long direct_index = ADDRS_PER_INODE; 317 const long direct_blks = ADDRS_PER_BLOCK; 318 const long dptrs_per_blk = NIDS_PER_BLOCK; 319 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 320 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 321 int n = 0; 322 int level = 0; 323 324 noffset[0] = 0; 325 326 if (block < direct_index) { 327 offset[n] = block; 328 goto got; 329 } 330 block -= direct_index; 331 if (block < direct_blks) { 332 offset[n++] = NODE_DIR1_BLOCK; 333 noffset[n] = 1; 334 offset[n] = block; 335 level = 1; 336 goto got; 337 } 338 block -= direct_blks; 339 if (block < direct_blks) { 340 offset[n++] = NODE_DIR2_BLOCK; 341 noffset[n] = 2; 342 offset[n] = block; 343 level = 1; 344 goto got; 345 } 346 block -= direct_blks; 347 if (block < indirect_blks) { 348 offset[n++] = NODE_IND1_BLOCK; 349 noffset[n] = 3; 350 offset[n++] = block / direct_blks; 351 noffset[n] = 4 + offset[n - 1]; 352 offset[n] = block % direct_blks; 353 level = 2; 354 goto got; 355 } 356 block -= indirect_blks; 357 if (block < indirect_blks) { 358 offset[n++] = NODE_IND2_BLOCK; 359 noffset[n] = 4 + dptrs_per_blk; 360 offset[n++] = block / direct_blks; 361 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 362 offset[n] = block % direct_blks; 363 level = 2; 364 goto got; 365 } 366 block -= indirect_blks; 367 if (block < dindirect_blks) { 368 offset[n++] = NODE_DIND_BLOCK; 369 noffset[n] = 5 + (dptrs_per_blk * 2); 370 offset[n++] = block / indirect_blks; 371 noffset[n] = 6 + (dptrs_per_blk * 2) + 372 offset[n - 1] * (dptrs_per_blk + 1); 373 offset[n++] = (block / direct_blks) % dptrs_per_blk; 374 noffset[n] = 7 + (dptrs_per_blk * 2) + 375 offset[n - 2] * (dptrs_per_blk + 1) + 376 offset[n - 1]; 377 offset[n] = block % direct_blks; 378 level = 3; 379 goto got; 380 } else { 381 BUG(); 382 } 383 got: 384 return level; 385 } 386 387 /* 388 * Caller should call f2fs_put_dnode(dn). 389 * Also, it should grab and release a mutex by calling mutex_lock_op() and 390 * mutex_unlock_op() only if ro is not set RDONLY_NODE. 391 * In the case of RDONLY_NODE, we don't need to care about mutex. 392 */ 393 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 394 { 395 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 396 struct page *npage[4]; 397 struct page *parent; 398 int offset[4]; 399 unsigned int noffset[4]; 400 nid_t nids[4]; 401 int level, i; 402 int err = 0; 403 404 level = get_node_path(index, offset, noffset); 405 406 nids[0] = dn->inode->i_ino; 407 npage[0] = get_node_page(sbi, nids[0]); 408 if (IS_ERR(npage[0])) 409 return PTR_ERR(npage[0]); 410 411 parent = npage[0]; 412 if (level != 0) 413 nids[1] = get_nid(parent, offset[0], true); 414 dn->inode_page = npage[0]; 415 dn->inode_page_locked = true; 416 417 /* get indirect or direct nodes */ 418 for (i = 1; i <= level; i++) { 419 bool done = false; 420 421 if (!nids[i] && mode == ALLOC_NODE) { 422 /* alloc new node */ 423 if (!alloc_nid(sbi, &(nids[i]))) { 424 err = -ENOSPC; 425 goto release_pages; 426 } 427 428 dn->nid = nids[i]; 429 npage[i] = new_node_page(dn, noffset[i]); 430 if (IS_ERR(npage[i])) { 431 alloc_nid_failed(sbi, nids[i]); 432 err = PTR_ERR(npage[i]); 433 goto release_pages; 434 } 435 436 set_nid(parent, offset[i - 1], nids[i], i == 1); 437 alloc_nid_done(sbi, nids[i]); 438 done = true; 439 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 440 npage[i] = get_node_page_ra(parent, offset[i - 1]); 441 if (IS_ERR(npage[i])) { 442 err = PTR_ERR(npage[i]); 443 goto release_pages; 444 } 445 done = true; 446 } 447 if (i == 1) { 448 dn->inode_page_locked = false; 449 unlock_page(parent); 450 } else { 451 f2fs_put_page(parent, 1); 452 } 453 454 if (!done) { 455 npage[i] = get_node_page(sbi, nids[i]); 456 if (IS_ERR(npage[i])) { 457 err = PTR_ERR(npage[i]); 458 f2fs_put_page(npage[0], 0); 459 goto release_out; 460 } 461 } 462 if (i < level) { 463 parent = npage[i]; 464 nids[i + 1] = get_nid(parent, offset[i], false); 465 } 466 } 467 dn->nid = nids[level]; 468 dn->ofs_in_node = offset[level]; 469 dn->node_page = npage[level]; 470 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 471 return 0; 472 473 release_pages: 474 f2fs_put_page(parent, 1); 475 if (i > 1) 476 f2fs_put_page(npage[0], 0); 477 release_out: 478 dn->inode_page = NULL; 479 dn->node_page = NULL; 480 return err; 481 } 482 483 static void truncate_node(struct dnode_of_data *dn) 484 { 485 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 486 struct node_info ni; 487 488 get_node_info(sbi, dn->nid, &ni); 489 if (dn->inode->i_blocks == 0) { 490 BUG_ON(ni.blk_addr != NULL_ADDR); 491 goto invalidate; 492 } 493 BUG_ON(ni.blk_addr == NULL_ADDR); 494 495 /* Deallocate node address */ 496 invalidate_blocks(sbi, ni.blk_addr); 497 dec_valid_node_count(sbi, dn->inode, 1); 498 set_node_addr(sbi, &ni, NULL_ADDR); 499 500 if (dn->nid == dn->inode->i_ino) { 501 remove_orphan_inode(sbi, dn->nid); 502 dec_valid_inode_count(sbi); 503 } else { 504 sync_inode_page(dn); 505 } 506 invalidate: 507 clear_node_page_dirty(dn->node_page); 508 F2FS_SET_SB_DIRT(sbi); 509 510 f2fs_put_page(dn->node_page, 1); 511 dn->node_page = NULL; 512 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 513 } 514 515 static int truncate_dnode(struct dnode_of_data *dn) 516 { 517 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 518 struct page *page; 519 520 if (dn->nid == 0) 521 return 1; 522 523 /* get direct node */ 524 page = get_node_page(sbi, dn->nid); 525 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 526 return 1; 527 else if (IS_ERR(page)) 528 return PTR_ERR(page); 529 530 /* Make dnode_of_data for parameter */ 531 dn->node_page = page; 532 dn->ofs_in_node = 0; 533 truncate_data_blocks(dn); 534 truncate_node(dn); 535 return 1; 536 } 537 538 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 539 int ofs, int depth) 540 { 541 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 542 struct dnode_of_data rdn = *dn; 543 struct page *page; 544 struct f2fs_node *rn; 545 nid_t child_nid; 546 unsigned int child_nofs; 547 int freed = 0; 548 int i, ret; 549 550 if (dn->nid == 0) 551 return NIDS_PER_BLOCK + 1; 552 553 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 554 555 page = get_node_page(sbi, dn->nid); 556 if (IS_ERR(page)) { 557 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 558 return PTR_ERR(page); 559 } 560 561 rn = (struct f2fs_node *)page_address(page); 562 if (depth < 3) { 563 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 564 child_nid = le32_to_cpu(rn->in.nid[i]); 565 if (child_nid == 0) 566 continue; 567 rdn.nid = child_nid; 568 ret = truncate_dnode(&rdn); 569 if (ret < 0) 570 goto out_err; 571 set_nid(page, i, 0, false); 572 } 573 } else { 574 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 575 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 576 child_nid = le32_to_cpu(rn->in.nid[i]); 577 if (child_nid == 0) { 578 child_nofs += NIDS_PER_BLOCK + 1; 579 continue; 580 } 581 rdn.nid = child_nid; 582 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 583 if (ret == (NIDS_PER_BLOCK + 1)) { 584 set_nid(page, i, 0, false); 585 child_nofs += ret; 586 } else if (ret < 0 && ret != -ENOENT) { 587 goto out_err; 588 } 589 } 590 freed = child_nofs; 591 } 592 593 if (!ofs) { 594 /* remove current indirect node */ 595 dn->node_page = page; 596 truncate_node(dn); 597 freed++; 598 } else { 599 f2fs_put_page(page, 1); 600 } 601 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 602 return freed; 603 604 out_err: 605 f2fs_put_page(page, 1); 606 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 607 return ret; 608 } 609 610 static int truncate_partial_nodes(struct dnode_of_data *dn, 611 struct f2fs_inode *ri, int *offset, int depth) 612 { 613 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 614 struct page *pages[2]; 615 nid_t nid[3]; 616 nid_t child_nid; 617 int err = 0; 618 int i; 619 int idx = depth - 2; 620 621 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 622 if (!nid[0]) 623 return 0; 624 625 /* get indirect nodes in the path */ 626 for (i = 0; i < depth - 1; i++) { 627 /* refernece count'll be increased */ 628 pages[i] = get_node_page(sbi, nid[i]); 629 if (IS_ERR(pages[i])) { 630 depth = i + 1; 631 err = PTR_ERR(pages[i]); 632 goto fail; 633 } 634 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 635 } 636 637 /* free direct nodes linked to a partial indirect node */ 638 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) { 639 child_nid = get_nid(pages[idx], i, false); 640 if (!child_nid) 641 continue; 642 dn->nid = child_nid; 643 err = truncate_dnode(dn); 644 if (err < 0) 645 goto fail; 646 set_nid(pages[idx], i, 0, false); 647 } 648 649 if (offset[depth - 1] == 0) { 650 dn->node_page = pages[idx]; 651 dn->nid = nid[idx]; 652 truncate_node(dn); 653 } else { 654 f2fs_put_page(pages[idx], 1); 655 } 656 offset[idx]++; 657 offset[depth - 1] = 0; 658 fail: 659 for (i = depth - 3; i >= 0; i--) 660 f2fs_put_page(pages[i], 1); 661 662 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 663 664 return err; 665 } 666 667 /* 668 * All the block addresses of data and nodes should be nullified. 669 */ 670 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 671 { 672 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 673 int err = 0, cont = 1; 674 int level, offset[4], noffset[4]; 675 unsigned int nofs = 0; 676 struct f2fs_node *rn; 677 struct dnode_of_data dn; 678 struct page *page; 679 680 trace_f2fs_truncate_inode_blocks_enter(inode, from); 681 682 level = get_node_path(from, offset, noffset); 683 684 page = get_node_page(sbi, inode->i_ino); 685 if (IS_ERR(page)) { 686 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 687 return PTR_ERR(page); 688 } 689 690 set_new_dnode(&dn, inode, page, NULL, 0); 691 unlock_page(page); 692 693 rn = page_address(page); 694 switch (level) { 695 case 0: 696 case 1: 697 nofs = noffset[1]; 698 break; 699 case 2: 700 nofs = noffset[1]; 701 if (!offset[level - 1]) 702 goto skip_partial; 703 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 704 if (err < 0 && err != -ENOENT) 705 goto fail; 706 nofs += 1 + NIDS_PER_BLOCK; 707 break; 708 case 3: 709 nofs = 5 + 2 * NIDS_PER_BLOCK; 710 if (!offset[level - 1]) 711 goto skip_partial; 712 err = truncate_partial_nodes(&dn, &rn->i, offset, level); 713 if (err < 0 && err != -ENOENT) 714 goto fail; 715 break; 716 default: 717 BUG(); 718 } 719 720 skip_partial: 721 while (cont) { 722 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]); 723 switch (offset[0]) { 724 case NODE_DIR1_BLOCK: 725 case NODE_DIR2_BLOCK: 726 err = truncate_dnode(&dn); 727 break; 728 729 case NODE_IND1_BLOCK: 730 case NODE_IND2_BLOCK: 731 err = truncate_nodes(&dn, nofs, offset[1], 2); 732 break; 733 734 case NODE_DIND_BLOCK: 735 err = truncate_nodes(&dn, nofs, offset[1], 3); 736 cont = 0; 737 break; 738 739 default: 740 BUG(); 741 } 742 if (err < 0 && err != -ENOENT) 743 goto fail; 744 if (offset[1] == 0 && 745 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) { 746 lock_page(page); 747 wait_on_page_writeback(page); 748 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 749 set_page_dirty(page); 750 unlock_page(page); 751 } 752 offset[1] = 0; 753 offset[0]++; 754 nofs += err; 755 } 756 fail: 757 f2fs_put_page(page, 0); 758 trace_f2fs_truncate_inode_blocks_exit(inode, err); 759 return err > 0 ? 0 : err; 760 } 761 762 /* 763 * Caller should grab and release a mutex by calling mutex_lock_op() and 764 * mutex_unlock_op(). 765 */ 766 int remove_inode_page(struct inode *inode) 767 { 768 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 769 struct page *page; 770 nid_t ino = inode->i_ino; 771 struct dnode_of_data dn; 772 773 page = get_node_page(sbi, ino); 774 if (IS_ERR(page)) 775 return PTR_ERR(page); 776 777 if (F2FS_I(inode)->i_xattr_nid) { 778 nid_t nid = F2FS_I(inode)->i_xattr_nid; 779 struct page *npage = get_node_page(sbi, nid); 780 781 if (IS_ERR(npage)) 782 return PTR_ERR(npage); 783 784 F2FS_I(inode)->i_xattr_nid = 0; 785 set_new_dnode(&dn, inode, page, npage, nid); 786 dn.inode_page_locked = 1; 787 truncate_node(&dn); 788 } 789 790 /* 0 is possible, after f2fs_new_inode() is failed */ 791 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1); 792 set_new_dnode(&dn, inode, page, page, ino); 793 truncate_node(&dn); 794 return 0; 795 } 796 797 int new_inode_page(struct inode *inode, const struct qstr *name) 798 { 799 struct page *page; 800 struct dnode_of_data dn; 801 802 /* allocate inode page for new inode */ 803 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 804 page = new_node_page(&dn, 0); 805 init_dent_inode(name, page); 806 if (IS_ERR(page)) 807 return PTR_ERR(page); 808 f2fs_put_page(page, 1); 809 return 0; 810 } 811 812 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) 813 { 814 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 815 struct address_space *mapping = sbi->node_inode->i_mapping; 816 struct node_info old_ni, new_ni; 817 struct page *page; 818 int err; 819 820 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) 821 return ERR_PTR(-EPERM); 822 823 page = grab_cache_page(mapping, dn->nid); 824 if (!page) 825 return ERR_PTR(-ENOMEM); 826 827 get_node_info(sbi, dn->nid, &old_ni); 828 829 SetPageUptodate(page); 830 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 831 832 /* Reinitialize old_ni with new node page */ 833 BUG_ON(old_ni.blk_addr != NULL_ADDR); 834 new_ni = old_ni; 835 new_ni.ino = dn->inode->i_ino; 836 837 if (!inc_valid_node_count(sbi, dn->inode, 1)) { 838 err = -ENOSPC; 839 goto fail; 840 } 841 set_node_addr(sbi, &new_ni, NEW_ADDR); 842 set_cold_node(dn->inode, page); 843 844 dn->node_page = page; 845 sync_inode_page(dn); 846 set_page_dirty(page); 847 if (ofs == 0) 848 inc_valid_inode_count(sbi); 849 850 return page; 851 852 fail: 853 clear_node_page_dirty(page); 854 f2fs_put_page(page, 1); 855 return ERR_PTR(err); 856 } 857 858 /* 859 * Caller should do after getting the following values. 860 * 0: f2fs_put_page(page, 0) 861 * LOCKED_PAGE: f2fs_put_page(page, 1) 862 * error: nothing 863 */ 864 static int read_node_page(struct page *page, int type) 865 { 866 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 867 struct node_info ni; 868 869 get_node_info(sbi, page->index, &ni); 870 871 if (ni.blk_addr == NULL_ADDR) { 872 f2fs_put_page(page, 1); 873 return -ENOENT; 874 } 875 876 if (PageUptodate(page)) 877 return LOCKED_PAGE; 878 879 return f2fs_readpage(sbi, page, ni.blk_addr, type); 880 } 881 882 /* 883 * Readahead a node page 884 */ 885 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 886 { 887 struct address_space *mapping = sbi->node_inode->i_mapping; 888 struct page *apage; 889 int err; 890 891 apage = find_get_page(mapping, nid); 892 if (apage && PageUptodate(apage)) { 893 f2fs_put_page(apage, 0); 894 return; 895 } 896 f2fs_put_page(apage, 0); 897 898 apage = grab_cache_page(mapping, nid); 899 if (!apage) 900 return; 901 902 err = read_node_page(apage, READA); 903 if (err == 0) 904 f2fs_put_page(apage, 0); 905 else if (err == LOCKED_PAGE) 906 f2fs_put_page(apage, 1); 907 return; 908 } 909 910 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 911 { 912 struct address_space *mapping = sbi->node_inode->i_mapping; 913 struct page *page; 914 int err; 915 916 page = grab_cache_page(mapping, nid); 917 if (!page) 918 return ERR_PTR(-ENOMEM); 919 920 err = read_node_page(page, READ_SYNC); 921 if (err < 0) 922 return ERR_PTR(err); 923 else if (err == LOCKED_PAGE) 924 goto got_it; 925 926 lock_page(page); 927 if (!PageUptodate(page)) { 928 f2fs_put_page(page, 1); 929 return ERR_PTR(-EIO); 930 } 931 got_it: 932 BUG_ON(nid != nid_of_node(page)); 933 mark_page_accessed(page); 934 return page; 935 } 936 937 /* 938 * Return a locked page for the desired node page. 939 * And, readahead MAX_RA_NODE number of node pages. 940 */ 941 struct page *get_node_page_ra(struct page *parent, int start) 942 { 943 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 944 struct address_space *mapping = sbi->node_inode->i_mapping; 945 struct page *page; 946 int err, i, end; 947 nid_t nid; 948 949 /* First, try getting the desired direct node. */ 950 nid = get_nid(parent, start, false); 951 if (!nid) 952 return ERR_PTR(-ENOENT); 953 954 page = grab_cache_page(mapping, nid); 955 if (!page) 956 return ERR_PTR(-ENOMEM); 957 958 err = read_node_page(page, READ_SYNC); 959 if (err < 0) 960 return ERR_PTR(err); 961 else if (err == LOCKED_PAGE) 962 goto page_hit; 963 964 /* Then, try readahead for siblings of the desired node */ 965 end = start + MAX_RA_NODE; 966 end = min(end, NIDS_PER_BLOCK); 967 for (i = start + 1; i < end; i++) { 968 nid = get_nid(parent, i, false); 969 if (!nid) 970 continue; 971 ra_node_page(sbi, nid); 972 } 973 974 lock_page(page); 975 976 page_hit: 977 if (!PageUptodate(page)) { 978 f2fs_put_page(page, 1); 979 return ERR_PTR(-EIO); 980 } 981 mark_page_accessed(page); 982 return page; 983 } 984 985 void sync_inode_page(struct dnode_of_data *dn) 986 { 987 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 988 update_inode(dn->inode, dn->node_page); 989 } else if (dn->inode_page) { 990 if (!dn->inode_page_locked) 991 lock_page(dn->inode_page); 992 update_inode(dn->inode, dn->inode_page); 993 if (!dn->inode_page_locked) 994 unlock_page(dn->inode_page); 995 } else { 996 update_inode_page(dn->inode); 997 } 998 } 999 1000 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1001 struct writeback_control *wbc) 1002 { 1003 struct address_space *mapping = sbi->node_inode->i_mapping; 1004 pgoff_t index, end; 1005 struct pagevec pvec; 1006 int step = ino ? 2 : 0; 1007 int nwritten = 0, wrote = 0; 1008 1009 pagevec_init(&pvec, 0); 1010 1011 next_step: 1012 index = 0; 1013 end = LONG_MAX; 1014 1015 while (index <= end) { 1016 int i, nr_pages; 1017 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1018 PAGECACHE_TAG_DIRTY, 1019 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1020 if (nr_pages == 0) 1021 break; 1022 1023 for (i = 0; i < nr_pages; i++) { 1024 struct page *page = pvec.pages[i]; 1025 1026 /* 1027 * flushing sequence with step: 1028 * 0. indirect nodes 1029 * 1. dentry dnodes 1030 * 2. file dnodes 1031 */ 1032 if (step == 0 && IS_DNODE(page)) 1033 continue; 1034 if (step == 1 && (!IS_DNODE(page) || 1035 is_cold_node(page))) 1036 continue; 1037 if (step == 2 && (!IS_DNODE(page) || 1038 !is_cold_node(page))) 1039 continue; 1040 1041 /* 1042 * If an fsync mode, 1043 * we should not skip writing node pages. 1044 */ 1045 if (ino && ino_of_node(page) == ino) 1046 lock_page(page); 1047 else if (!trylock_page(page)) 1048 continue; 1049 1050 if (unlikely(page->mapping != mapping)) { 1051 continue_unlock: 1052 unlock_page(page); 1053 continue; 1054 } 1055 if (ino && ino_of_node(page) != ino) 1056 goto continue_unlock; 1057 1058 if (!PageDirty(page)) { 1059 /* someone wrote it for us */ 1060 goto continue_unlock; 1061 } 1062 1063 if (!clear_page_dirty_for_io(page)) 1064 goto continue_unlock; 1065 1066 /* called by fsync() */ 1067 if (ino && IS_DNODE(page)) { 1068 int mark = !is_checkpointed_node(sbi, ino); 1069 set_fsync_mark(page, 1); 1070 if (IS_INODE(page)) 1071 set_dentry_mark(page, mark); 1072 nwritten++; 1073 } else { 1074 set_fsync_mark(page, 0); 1075 set_dentry_mark(page, 0); 1076 } 1077 mapping->a_ops->writepage(page, wbc); 1078 wrote++; 1079 1080 if (--wbc->nr_to_write == 0) 1081 break; 1082 } 1083 pagevec_release(&pvec); 1084 cond_resched(); 1085 1086 if (wbc->nr_to_write == 0) { 1087 step = 2; 1088 break; 1089 } 1090 } 1091 1092 if (step < 2) { 1093 step++; 1094 goto next_step; 1095 } 1096 1097 if (wrote) 1098 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL); 1099 1100 return nwritten; 1101 } 1102 1103 static int f2fs_write_node_page(struct page *page, 1104 struct writeback_control *wbc) 1105 { 1106 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1107 nid_t nid; 1108 block_t new_addr; 1109 struct node_info ni; 1110 1111 wait_on_page_writeback(page); 1112 1113 /* get old block addr of this node page */ 1114 nid = nid_of_node(page); 1115 BUG_ON(page->index != nid); 1116 1117 get_node_info(sbi, nid, &ni); 1118 1119 /* This page is already truncated */ 1120 if (ni.blk_addr == NULL_ADDR) { 1121 dec_page_count(sbi, F2FS_DIRTY_NODES); 1122 unlock_page(page); 1123 return 0; 1124 } 1125 1126 if (wbc->for_reclaim) { 1127 dec_page_count(sbi, F2FS_DIRTY_NODES); 1128 wbc->pages_skipped++; 1129 set_page_dirty(page); 1130 return AOP_WRITEPAGE_ACTIVATE; 1131 } 1132 1133 mutex_lock(&sbi->node_write); 1134 set_page_writeback(page); 1135 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); 1136 set_node_addr(sbi, &ni, new_addr); 1137 dec_page_count(sbi, F2FS_DIRTY_NODES); 1138 mutex_unlock(&sbi->node_write); 1139 unlock_page(page); 1140 return 0; 1141 } 1142 1143 /* 1144 * It is very important to gather dirty pages and write at once, so that we can 1145 * submit a big bio without interfering other data writes. 1146 * Be default, 512 pages (2MB), a segment size, is quite reasonable. 1147 */ 1148 #define COLLECT_DIRTY_NODES 512 1149 static int f2fs_write_node_pages(struct address_space *mapping, 1150 struct writeback_control *wbc) 1151 { 1152 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1153 struct block_device *bdev = sbi->sb->s_bdev; 1154 long nr_to_write = wbc->nr_to_write; 1155 1156 /* First check balancing cached NAT entries */ 1157 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1158 f2fs_sync_fs(sbi->sb, true); 1159 return 0; 1160 } 1161 1162 /* collect a number of dirty node pages and write together */ 1163 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) 1164 return 0; 1165 1166 /* if mounting is failed, skip writing node pages */ 1167 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1168 sync_node_pages(sbi, 0, wbc); 1169 wbc->nr_to_write = nr_to_write - 1170 (bio_get_nr_vecs(bdev) - wbc->nr_to_write); 1171 return 0; 1172 } 1173 1174 static int f2fs_set_node_page_dirty(struct page *page) 1175 { 1176 struct address_space *mapping = page->mapping; 1177 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1178 1179 SetPageUptodate(page); 1180 if (!PageDirty(page)) { 1181 __set_page_dirty_nobuffers(page); 1182 inc_page_count(sbi, F2FS_DIRTY_NODES); 1183 SetPagePrivate(page); 1184 return 1; 1185 } 1186 return 0; 1187 } 1188 1189 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) 1190 { 1191 struct inode *inode = page->mapping->host; 1192 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1193 if (PageDirty(page)) 1194 dec_page_count(sbi, F2FS_DIRTY_NODES); 1195 ClearPagePrivate(page); 1196 } 1197 1198 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1199 { 1200 ClearPagePrivate(page); 1201 return 1; 1202 } 1203 1204 /* 1205 * Structure of the f2fs node operations 1206 */ 1207 const struct address_space_operations f2fs_node_aops = { 1208 .writepage = f2fs_write_node_page, 1209 .writepages = f2fs_write_node_pages, 1210 .set_page_dirty = f2fs_set_node_page_dirty, 1211 .invalidatepage = f2fs_invalidate_node_page, 1212 .releasepage = f2fs_release_node_page, 1213 }; 1214 1215 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) 1216 { 1217 struct list_head *this; 1218 struct free_nid *i; 1219 list_for_each(this, head) { 1220 i = list_entry(this, struct free_nid, list); 1221 if (i->nid == n) 1222 return i; 1223 } 1224 return NULL; 1225 } 1226 1227 static void __del_from_free_nid_list(struct free_nid *i) 1228 { 1229 list_del(&i->list); 1230 kmem_cache_free(free_nid_slab, i); 1231 } 1232 1233 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1234 { 1235 struct free_nid *i; 1236 1237 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) 1238 return 0; 1239 retry: 1240 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1241 if (!i) { 1242 cond_resched(); 1243 goto retry; 1244 } 1245 i->nid = nid; 1246 i->state = NID_NEW; 1247 1248 spin_lock(&nm_i->free_nid_list_lock); 1249 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { 1250 spin_unlock(&nm_i->free_nid_list_lock); 1251 kmem_cache_free(free_nid_slab, i); 1252 return 0; 1253 } 1254 list_add_tail(&i->list, &nm_i->free_nid_list); 1255 nm_i->fcnt++; 1256 spin_unlock(&nm_i->free_nid_list_lock); 1257 return 1; 1258 } 1259 1260 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1261 { 1262 struct free_nid *i; 1263 spin_lock(&nm_i->free_nid_list_lock); 1264 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1265 if (i && i->state == NID_NEW) { 1266 __del_from_free_nid_list(i); 1267 nm_i->fcnt--; 1268 } 1269 spin_unlock(&nm_i->free_nid_list_lock); 1270 } 1271 1272 static int scan_nat_page(struct f2fs_nm_info *nm_i, 1273 struct page *nat_page, nid_t start_nid) 1274 { 1275 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1276 block_t blk_addr; 1277 int fcnt = 0; 1278 int i; 1279 1280 /* 0 nid should not be used */ 1281 if (start_nid == 0) 1282 ++start_nid; 1283 1284 i = start_nid % NAT_ENTRY_PER_BLOCK; 1285 1286 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1287 if (start_nid >= nm_i->max_nid) 1288 break; 1289 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1290 BUG_ON(blk_addr == NEW_ADDR); 1291 if (blk_addr == NULL_ADDR) 1292 fcnt += add_free_nid(nm_i, start_nid); 1293 } 1294 return fcnt; 1295 } 1296 1297 static void build_free_nids(struct f2fs_sb_info *sbi) 1298 { 1299 struct free_nid *fnid, *next_fnid; 1300 struct f2fs_nm_info *nm_i = NM_I(sbi); 1301 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1302 struct f2fs_summary_block *sum = curseg->sum_blk; 1303 nid_t nid = 0; 1304 bool is_cycled = false; 1305 int fcnt = 0; 1306 int i; 1307 1308 nid = nm_i->next_scan_nid; 1309 nm_i->init_scan_nid = nid; 1310 1311 ra_nat_pages(sbi, nid); 1312 1313 while (1) { 1314 struct page *page = get_current_nat_page(sbi, nid); 1315 1316 fcnt += scan_nat_page(nm_i, page, nid); 1317 f2fs_put_page(page, 1); 1318 1319 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1320 1321 if (nid >= nm_i->max_nid) { 1322 nid = 0; 1323 is_cycled = true; 1324 } 1325 if (fcnt > MAX_FREE_NIDS) 1326 break; 1327 if (is_cycled && nm_i->init_scan_nid <= nid) 1328 break; 1329 } 1330 1331 /* go to the next nat page in order to reuse free nids first */ 1332 nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK; 1333 1334 /* find free nids from current sum_pages */ 1335 mutex_lock(&curseg->curseg_mutex); 1336 for (i = 0; i < nats_in_cursum(sum); i++) { 1337 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1338 nid = le32_to_cpu(nid_in_journal(sum, i)); 1339 if (addr == NULL_ADDR) 1340 add_free_nid(nm_i, nid); 1341 else 1342 remove_free_nid(nm_i, nid); 1343 } 1344 mutex_unlock(&curseg->curseg_mutex); 1345 1346 /* remove the free nids from current allocated nids */ 1347 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) { 1348 struct nat_entry *ne; 1349 1350 read_lock(&nm_i->nat_tree_lock); 1351 ne = __lookup_nat_cache(nm_i, fnid->nid); 1352 if (ne && nat_get_blkaddr(ne) != NULL_ADDR) 1353 remove_free_nid(nm_i, fnid->nid); 1354 read_unlock(&nm_i->nat_tree_lock); 1355 } 1356 } 1357 1358 /* 1359 * If this function returns success, caller can obtain a new nid 1360 * from second parameter of this function. 1361 * The returned nid could be used ino as well as nid when inode is created. 1362 */ 1363 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1364 { 1365 struct f2fs_nm_info *nm_i = NM_I(sbi); 1366 struct free_nid *i = NULL; 1367 struct list_head *this; 1368 retry: 1369 mutex_lock(&nm_i->build_lock); 1370 if (!nm_i->fcnt) { 1371 /* scan NAT in order to build free nid list */ 1372 build_free_nids(sbi); 1373 if (!nm_i->fcnt) { 1374 mutex_unlock(&nm_i->build_lock); 1375 return false; 1376 } 1377 } 1378 mutex_unlock(&nm_i->build_lock); 1379 1380 /* 1381 * We check fcnt again since previous check is racy as 1382 * we didn't hold free_nid_list_lock. So other thread 1383 * could consume all of free nids. 1384 */ 1385 spin_lock(&nm_i->free_nid_list_lock); 1386 if (!nm_i->fcnt) { 1387 spin_unlock(&nm_i->free_nid_list_lock); 1388 goto retry; 1389 } 1390 1391 BUG_ON(list_empty(&nm_i->free_nid_list)); 1392 list_for_each(this, &nm_i->free_nid_list) { 1393 i = list_entry(this, struct free_nid, list); 1394 if (i->state == NID_NEW) 1395 break; 1396 } 1397 1398 BUG_ON(i->state != NID_NEW); 1399 *nid = i->nid; 1400 i->state = NID_ALLOC; 1401 nm_i->fcnt--; 1402 spin_unlock(&nm_i->free_nid_list_lock); 1403 return true; 1404 } 1405 1406 /* 1407 * alloc_nid() should be called prior to this function. 1408 */ 1409 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1410 { 1411 struct f2fs_nm_info *nm_i = NM_I(sbi); 1412 struct free_nid *i; 1413 1414 spin_lock(&nm_i->free_nid_list_lock); 1415 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1416 BUG_ON(!i || i->state != NID_ALLOC); 1417 __del_from_free_nid_list(i); 1418 spin_unlock(&nm_i->free_nid_list_lock); 1419 } 1420 1421 /* 1422 * alloc_nid() should be called prior to this function. 1423 */ 1424 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1425 { 1426 struct f2fs_nm_info *nm_i = NM_I(sbi); 1427 struct free_nid *i; 1428 1429 spin_lock(&nm_i->free_nid_list_lock); 1430 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); 1431 BUG_ON(!i || i->state != NID_ALLOC); 1432 i->state = NID_NEW; 1433 nm_i->fcnt++; 1434 spin_unlock(&nm_i->free_nid_list_lock); 1435 } 1436 1437 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, 1438 struct f2fs_summary *sum, struct node_info *ni, 1439 block_t new_blkaddr) 1440 { 1441 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); 1442 set_node_addr(sbi, ni, new_blkaddr); 1443 clear_node_page_dirty(page); 1444 } 1445 1446 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1447 { 1448 struct address_space *mapping = sbi->node_inode->i_mapping; 1449 struct f2fs_node *src, *dst; 1450 nid_t ino = ino_of_node(page); 1451 struct node_info old_ni, new_ni; 1452 struct page *ipage; 1453 1454 ipage = grab_cache_page(mapping, ino); 1455 if (!ipage) 1456 return -ENOMEM; 1457 1458 /* Should not use this inode from free nid list */ 1459 remove_free_nid(NM_I(sbi), ino); 1460 1461 get_node_info(sbi, ino, &old_ni); 1462 SetPageUptodate(ipage); 1463 fill_node_footer(ipage, ino, ino, 0, true); 1464 1465 src = (struct f2fs_node *)page_address(page); 1466 dst = (struct f2fs_node *)page_address(ipage); 1467 1468 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); 1469 dst->i.i_size = 0; 1470 dst->i.i_blocks = cpu_to_le64(1); 1471 dst->i.i_links = cpu_to_le32(1); 1472 dst->i.i_xattr_nid = 0; 1473 1474 new_ni = old_ni; 1475 new_ni.ino = ino; 1476 1477 set_node_addr(sbi, &new_ni, NEW_ADDR); 1478 inc_valid_inode_count(sbi); 1479 1480 f2fs_put_page(ipage, 1); 1481 return 0; 1482 } 1483 1484 int restore_node_summary(struct f2fs_sb_info *sbi, 1485 unsigned int segno, struct f2fs_summary_block *sum) 1486 { 1487 struct f2fs_node *rn; 1488 struct f2fs_summary *sum_entry; 1489 struct page *page; 1490 block_t addr; 1491 int i, last_offset; 1492 1493 /* alloc temporal page for read node */ 1494 page = alloc_page(GFP_NOFS | __GFP_ZERO); 1495 if (IS_ERR(page)) 1496 return PTR_ERR(page); 1497 lock_page(page); 1498 1499 /* scan the node segment */ 1500 last_offset = sbi->blocks_per_seg; 1501 addr = START_BLOCK(sbi, segno); 1502 sum_entry = &sum->entries[0]; 1503 1504 for (i = 0; i < last_offset; i++, sum_entry++) { 1505 /* 1506 * In order to read next node page, 1507 * we must clear PageUptodate flag. 1508 */ 1509 ClearPageUptodate(page); 1510 1511 if (f2fs_readpage(sbi, page, addr, READ_SYNC)) 1512 goto out; 1513 1514 lock_page(page); 1515 rn = (struct f2fs_node *)page_address(page); 1516 sum_entry->nid = rn->footer.nid; 1517 sum_entry->version = 0; 1518 sum_entry->ofs_in_node = 0; 1519 addr++; 1520 } 1521 unlock_page(page); 1522 out: 1523 __free_pages(page, 0); 1524 return 0; 1525 } 1526 1527 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) 1528 { 1529 struct f2fs_nm_info *nm_i = NM_I(sbi); 1530 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1531 struct f2fs_summary_block *sum = curseg->sum_blk; 1532 int i; 1533 1534 mutex_lock(&curseg->curseg_mutex); 1535 1536 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { 1537 mutex_unlock(&curseg->curseg_mutex); 1538 return false; 1539 } 1540 1541 for (i = 0; i < nats_in_cursum(sum); i++) { 1542 struct nat_entry *ne; 1543 struct f2fs_nat_entry raw_ne; 1544 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1545 1546 raw_ne = nat_in_journal(sum, i); 1547 retry: 1548 write_lock(&nm_i->nat_tree_lock); 1549 ne = __lookup_nat_cache(nm_i, nid); 1550 if (ne) { 1551 __set_nat_cache_dirty(nm_i, ne); 1552 write_unlock(&nm_i->nat_tree_lock); 1553 continue; 1554 } 1555 ne = grab_nat_entry(nm_i, nid); 1556 if (!ne) { 1557 write_unlock(&nm_i->nat_tree_lock); 1558 goto retry; 1559 } 1560 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); 1561 nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); 1562 nat_set_version(ne, raw_ne.version); 1563 __set_nat_cache_dirty(nm_i, ne); 1564 write_unlock(&nm_i->nat_tree_lock); 1565 } 1566 update_nats_in_cursum(sum, -i); 1567 mutex_unlock(&curseg->curseg_mutex); 1568 return true; 1569 } 1570 1571 /* 1572 * This function is called during the checkpointing process. 1573 */ 1574 void flush_nat_entries(struct f2fs_sb_info *sbi) 1575 { 1576 struct f2fs_nm_info *nm_i = NM_I(sbi); 1577 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1578 struct f2fs_summary_block *sum = curseg->sum_blk; 1579 struct list_head *cur, *n; 1580 struct page *page = NULL; 1581 struct f2fs_nat_block *nat_blk = NULL; 1582 nid_t start_nid = 0, end_nid = 0; 1583 bool flushed; 1584 1585 flushed = flush_nats_in_journal(sbi); 1586 1587 if (!flushed) 1588 mutex_lock(&curseg->curseg_mutex); 1589 1590 /* 1) flush dirty nat caches */ 1591 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { 1592 struct nat_entry *ne; 1593 nid_t nid; 1594 struct f2fs_nat_entry raw_ne; 1595 int offset = -1; 1596 block_t new_blkaddr; 1597 1598 ne = list_entry(cur, struct nat_entry, list); 1599 nid = nat_get_nid(ne); 1600 1601 if (nat_get_blkaddr(ne) == NEW_ADDR) 1602 continue; 1603 if (flushed) 1604 goto to_nat_page; 1605 1606 /* if there is room for nat enries in curseg->sumpage */ 1607 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); 1608 if (offset >= 0) { 1609 raw_ne = nat_in_journal(sum, offset); 1610 goto flush_now; 1611 } 1612 to_nat_page: 1613 if (!page || (start_nid > nid || nid > end_nid)) { 1614 if (page) { 1615 f2fs_put_page(page, 1); 1616 page = NULL; 1617 } 1618 start_nid = START_NID(nid); 1619 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; 1620 1621 /* 1622 * get nat block with dirty flag, increased reference 1623 * count, mapped and lock 1624 */ 1625 page = get_next_nat_page(sbi, start_nid); 1626 nat_blk = page_address(page); 1627 } 1628 1629 BUG_ON(!nat_blk); 1630 raw_ne = nat_blk->entries[nid - start_nid]; 1631 flush_now: 1632 new_blkaddr = nat_get_blkaddr(ne); 1633 1634 raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); 1635 raw_ne.block_addr = cpu_to_le32(new_blkaddr); 1636 raw_ne.version = nat_get_version(ne); 1637 1638 if (offset < 0) { 1639 nat_blk->entries[nid - start_nid] = raw_ne; 1640 } else { 1641 nat_in_journal(sum, offset) = raw_ne; 1642 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1643 } 1644 1645 if (nat_get_blkaddr(ne) == NULL_ADDR && 1646 !add_free_nid(NM_I(sbi), nid)) { 1647 write_lock(&nm_i->nat_tree_lock); 1648 __del_from_nat_cache(nm_i, ne); 1649 write_unlock(&nm_i->nat_tree_lock); 1650 } else { 1651 write_lock(&nm_i->nat_tree_lock); 1652 __clear_nat_cache_dirty(nm_i, ne); 1653 ne->checkpointed = true; 1654 write_unlock(&nm_i->nat_tree_lock); 1655 } 1656 } 1657 if (!flushed) 1658 mutex_unlock(&curseg->curseg_mutex); 1659 f2fs_put_page(page, 1); 1660 1661 /* 2) shrink nat caches if necessary */ 1662 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); 1663 } 1664 1665 static int init_node_manager(struct f2fs_sb_info *sbi) 1666 { 1667 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1668 struct f2fs_nm_info *nm_i = NM_I(sbi); 1669 unsigned char *version_bitmap; 1670 unsigned int nat_segs, nat_blocks; 1671 1672 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1673 1674 /* segment_count_nat includes pair segment so divide to 2. */ 1675 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1676 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1677 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1678 nm_i->fcnt = 0; 1679 nm_i->nat_cnt = 0; 1680 1681 INIT_LIST_HEAD(&nm_i->free_nid_list); 1682 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1683 INIT_LIST_HEAD(&nm_i->nat_entries); 1684 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1685 1686 mutex_init(&nm_i->build_lock); 1687 spin_lock_init(&nm_i->free_nid_list_lock); 1688 rwlock_init(&nm_i->nat_tree_lock); 1689 1690 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1691 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1692 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1693 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1694 if (!version_bitmap) 1695 return -EFAULT; 1696 1697 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1698 GFP_KERNEL); 1699 if (!nm_i->nat_bitmap) 1700 return -ENOMEM; 1701 return 0; 1702 } 1703 1704 int build_node_manager(struct f2fs_sb_info *sbi) 1705 { 1706 int err; 1707 1708 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1709 if (!sbi->nm_info) 1710 return -ENOMEM; 1711 1712 err = init_node_manager(sbi); 1713 if (err) 1714 return err; 1715 1716 build_free_nids(sbi); 1717 return 0; 1718 } 1719 1720 void destroy_node_manager(struct f2fs_sb_info *sbi) 1721 { 1722 struct f2fs_nm_info *nm_i = NM_I(sbi); 1723 struct free_nid *i, *next_i; 1724 struct nat_entry *natvec[NATVEC_SIZE]; 1725 nid_t nid = 0; 1726 unsigned int found; 1727 1728 if (!nm_i) 1729 return; 1730 1731 /* destroy free nid list */ 1732 spin_lock(&nm_i->free_nid_list_lock); 1733 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 1734 BUG_ON(i->state == NID_ALLOC); 1735 __del_from_free_nid_list(i); 1736 nm_i->fcnt--; 1737 } 1738 BUG_ON(nm_i->fcnt); 1739 spin_unlock(&nm_i->free_nid_list_lock); 1740 1741 /* destroy nat cache */ 1742 write_lock(&nm_i->nat_tree_lock); 1743 while ((found = __gang_lookup_nat_cache(nm_i, 1744 nid, NATVEC_SIZE, natvec))) { 1745 unsigned idx; 1746 for (idx = 0; idx < found; idx++) { 1747 struct nat_entry *e = natvec[idx]; 1748 nid = nat_get_nid(e) + 1; 1749 __del_from_nat_cache(nm_i, e); 1750 } 1751 } 1752 BUG_ON(nm_i->nat_cnt); 1753 write_unlock(&nm_i->nat_tree_lock); 1754 1755 kfree(nm_i->nat_bitmap); 1756 sbi->nm_info = NULL; 1757 kfree(nm_i); 1758 } 1759 1760 int __init create_node_manager_caches(void) 1761 { 1762 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1763 sizeof(struct nat_entry), NULL); 1764 if (!nat_entry_slab) 1765 return -ENOMEM; 1766 1767 free_nid_slab = f2fs_kmem_cache_create("free_nid", 1768 sizeof(struct free_nid), NULL); 1769 if (!free_nid_slab) { 1770 kmem_cache_destroy(nat_entry_slab); 1771 return -ENOMEM; 1772 } 1773 return 0; 1774 } 1775 1776 void destroy_node_manager_caches(void) 1777 { 1778 kmem_cache_destroy(free_nid_slab); 1779 kmem_cache_destroy(nat_entry_slab); 1780 } 1781