1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include <trace/events/f2fs.h> 23 24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25 26 static struct kmem_cache *nat_entry_slab; 27 static struct kmem_cache *free_nid_slab; 28 29 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 30 { 31 struct f2fs_nm_info *nm_i = NM_I(sbi); 32 struct sysinfo val; 33 unsigned long mem_size = 0; 34 bool res = false; 35 36 si_meminfo(&val); 37 /* give 25%, 25%, 50% memory for each components respectively */ 38 if (type == FREE_NIDS) { 39 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12; 40 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 41 } else if (type == NAT_ENTRIES) { 42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 44 } else if (type == DIRTY_DENTS) { 45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 47 } 48 return res; 49 } 50 51 static void clear_node_page_dirty(struct page *page) 52 { 53 struct address_space *mapping = page->mapping; 54 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 55 unsigned int long flags; 56 57 if (PageDirty(page)) { 58 spin_lock_irqsave(&mapping->tree_lock, flags); 59 radix_tree_tag_clear(&mapping->page_tree, 60 page_index(page), 61 PAGECACHE_TAG_DIRTY); 62 spin_unlock_irqrestore(&mapping->tree_lock, flags); 63 64 clear_page_dirty_for_io(page); 65 dec_page_count(sbi, F2FS_DIRTY_NODES); 66 } 67 ClearPageUptodate(page); 68 } 69 70 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 71 { 72 pgoff_t index = current_nat_addr(sbi, nid); 73 return get_meta_page(sbi, index); 74 } 75 76 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 77 { 78 struct page *src_page; 79 struct page *dst_page; 80 pgoff_t src_off; 81 pgoff_t dst_off; 82 void *src_addr; 83 void *dst_addr; 84 struct f2fs_nm_info *nm_i = NM_I(sbi); 85 86 src_off = current_nat_addr(sbi, nid); 87 dst_off = next_nat_addr(sbi, src_off); 88 89 /* get current nat block page with lock */ 90 src_page = get_meta_page(sbi, src_off); 91 92 /* Dirty src_page means that it is already the new target NAT page. */ 93 if (PageDirty(src_page)) 94 return src_page; 95 96 dst_page = grab_meta_page(sbi, dst_off); 97 98 src_addr = page_address(src_page); 99 dst_addr = page_address(dst_page); 100 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 101 set_page_dirty(dst_page); 102 f2fs_put_page(src_page, 1); 103 104 set_to_next_nat(nm_i, nid); 105 106 return dst_page; 107 } 108 109 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 110 { 111 return radix_tree_lookup(&nm_i->nat_root, n); 112 } 113 114 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 115 nid_t start, unsigned int nr, struct nat_entry **ep) 116 { 117 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 118 } 119 120 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 121 { 122 list_del(&e->list); 123 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 124 nm_i->nat_cnt--; 125 kmem_cache_free(nat_entry_slab, e); 126 } 127 128 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 129 { 130 struct f2fs_nm_info *nm_i = NM_I(sbi); 131 struct nat_entry *e; 132 int is_cp = 1; 133 134 read_lock(&nm_i->nat_tree_lock); 135 e = __lookup_nat_cache(nm_i, nid); 136 if (e && !e->checkpointed) 137 is_cp = 0; 138 read_unlock(&nm_i->nat_tree_lock); 139 return is_cp; 140 } 141 142 bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid) 143 { 144 struct f2fs_nm_info *nm_i = NM_I(sbi); 145 struct nat_entry *e; 146 bool fsync_done = false; 147 148 read_lock(&nm_i->nat_tree_lock); 149 e = __lookup_nat_cache(nm_i, nid); 150 if (e) 151 fsync_done = e->fsync_done; 152 read_unlock(&nm_i->nat_tree_lock); 153 return fsync_done; 154 } 155 156 void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid) 157 { 158 struct f2fs_nm_info *nm_i = NM_I(sbi); 159 struct nat_entry *e; 160 161 write_lock(&nm_i->nat_tree_lock); 162 e = __lookup_nat_cache(nm_i, nid); 163 if (e) 164 e->fsync_done = false; 165 write_unlock(&nm_i->nat_tree_lock); 166 } 167 168 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 169 { 170 struct nat_entry *new; 171 172 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 173 if (!new) 174 return NULL; 175 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 176 kmem_cache_free(nat_entry_slab, new); 177 return NULL; 178 } 179 memset(new, 0, sizeof(struct nat_entry)); 180 nat_set_nid(new, nid); 181 new->checkpointed = true; 182 list_add_tail(&new->list, &nm_i->nat_entries); 183 nm_i->nat_cnt++; 184 return new; 185 } 186 187 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 188 struct f2fs_nat_entry *ne) 189 { 190 struct nat_entry *e; 191 retry: 192 write_lock(&nm_i->nat_tree_lock); 193 e = __lookup_nat_cache(nm_i, nid); 194 if (!e) { 195 e = grab_nat_entry(nm_i, nid); 196 if (!e) { 197 write_unlock(&nm_i->nat_tree_lock); 198 goto retry; 199 } 200 node_info_from_raw_nat(&e->ni, ne); 201 } 202 write_unlock(&nm_i->nat_tree_lock); 203 } 204 205 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 206 block_t new_blkaddr, bool fsync_done) 207 { 208 struct f2fs_nm_info *nm_i = NM_I(sbi); 209 struct nat_entry *e; 210 retry: 211 write_lock(&nm_i->nat_tree_lock); 212 e = __lookup_nat_cache(nm_i, ni->nid); 213 if (!e) { 214 e = grab_nat_entry(nm_i, ni->nid); 215 if (!e) { 216 write_unlock(&nm_i->nat_tree_lock); 217 goto retry; 218 } 219 e->ni = *ni; 220 f2fs_bug_on(ni->blk_addr == NEW_ADDR); 221 } else if (new_blkaddr == NEW_ADDR) { 222 /* 223 * when nid is reallocated, 224 * previous nat entry can be remained in nat cache. 225 * So, reinitialize it with new information. 226 */ 227 e->ni = *ni; 228 f2fs_bug_on(ni->blk_addr != NULL_ADDR); 229 } 230 231 /* sanity check */ 232 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr); 233 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR && 234 new_blkaddr == NULL_ADDR); 235 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR && 236 new_blkaddr == NEW_ADDR); 237 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR && 238 nat_get_blkaddr(e) != NULL_ADDR && 239 new_blkaddr == NEW_ADDR); 240 241 /* increament version no as node is removed */ 242 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 243 unsigned char version = nat_get_version(e); 244 nat_set_version(e, inc_node_version(version)); 245 } 246 247 /* change address */ 248 nat_set_blkaddr(e, new_blkaddr); 249 __set_nat_cache_dirty(nm_i, e); 250 251 /* update fsync_mark if its inode nat entry is still alive */ 252 e = __lookup_nat_cache(nm_i, ni->ino); 253 if (e) 254 e->fsync_done = fsync_done; 255 write_unlock(&nm_i->nat_tree_lock); 256 } 257 258 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 259 { 260 struct f2fs_nm_info *nm_i = NM_I(sbi); 261 262 if (available_free_memory(sbi, NAT_ENTRIES)) 263 return 0; 264 265 write_lock(&nm_i->nat_tree_lock); 266 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 267 struct nat_entry *ne; 268 ne = list_first_entry(&nm_i->nat_entries, 269 struct nat_entry, list); 270 __del_from_nat_cache(nm_i, ne); 271 nr_shrink--; 272 } 273 write_unlock(&nm_i->nat_tree_lock); 274 return nr_shrink; 275 } 276 277 /* 278 * This function returns always success 279 */ 280 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 281 { 282 struct f2fs_nm_info *nm_i = NM_I(sbi); 283 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 284 struct f2fs_summary_block *sum = curseg->sum_blk; 285 nid_t start_nid = START_NID(nid); 286 struct f2fs_nat_block *nat_blk; 287 struct page *page = NULL; 288 struct f2fs_nat_entry ne; 289 struct nat_entry *e; 290 int i; 291 292 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 293 ni->nid = nid; 294 295 /* Check nat cache */ 296 read_lock(&nm_i->nat_tree_lock); 297 e = __lookup_nat_cache(nm_i, nid); 298 if (e) { 299 ni->ino = nat_get_ino(e); 300 ni->blk_addr = nat_get_blkaddr(e); 301 ni->version = nat_get_version(e); 302 } 303 read_unlock(&nm_i->nat_tree_lock); 304 if (e) 305 return; 306 307 /* Check current segment summary */ 308 mutex_lock(&curseg->curseg_mutex); 309 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 310 if (i >= 0) { 311 ne = nat_in_journal(sum, i); 312 node_info_from_raw_nat(ni, &ne); 313 } 314 mutex_unlock(&curseg->curseg_mutex); 315 if (i >= 0) 316 goto cache; 317 318 /* Fill node_info from nat page */ 319 page = get_current_nat_page(sbi, start_nid); 320 nat_blk = (struct f2fs_nat_block *)page_address(page); 321 ne = nat_blk->entries[nid - start_nid]; 322 node_info_from_raw_nat(ni, &ne); 323 f2fs_put_page(page, 1); 324 cache: 325 /* cache nat entry */ 326 cache_nat_entry(NM_I(sbi), nid, &ne); 327 } 328 329 /* 330 * The maximum depth is four. 331 * Offset[0] will have raw inode offset. 332 */ 333 static int get_node_path(struct f2fs_inode_info *fi, long block, 334 int offset[4], unsigned int noffset[4]) 335 { 336 const long direct_index = ADDRS_PER_INODE(fi); 337 const long direct_blks = ADDRS_PER_BLOCK; 338 const long dptrs_per_blk = NIDS_PER_BLOCK; 339 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 340 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 341 int n = 0; 342 int level = 0; 343 344 noffset[0] = 0; 345 346 if (block < direct_index) { 347 offset[n] = block; 348 goto got; 349 } 350 block -= direct_index; 351 if (block < direct_blks) { 352 offset[n++] = NODE_DIR1_BLOCK; 353 noffset[n] = 1; 354 offset[n] = block; 355 level = 1; 356 goto got; 357 } 358 block -= direct_blks; 359 if (block < direct_blks) { 360 offset[n++] = NODE_DIR2_BLOCK; 361 noffset[n] = 2; 362 offset[n] = block; 363 level = 1; 364 goto got; 365 } 366 block -= direct_blks; 367 if (block < indirect_blks) { 368 offset[n++] = NODE_IND1_BLOCK; 369 noffset[n] = 3; 370 offset[n++] = block / direct_blks; 371 noffset[n] = 4 + offset[n - 1]; 372 offset[n] = block % direct_blks; 373 level = 2; 374 goto got; 375 } 376 block -= indirect_blks; 377 if (block < indirect_blks) { 378 offset[n++] = NODE_IND2_BLOCK; 379 noffset[n] = 4 + dptrs_per_blk; 380 offset[n++] = block / direct_blks; 381 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 382 offset[n] = block % direct_blks; 383 level = 2; 384 goto got; 385 } 386 block -= indirect_blks; 387 if (block < dindirect_blks) { 388 offset[n++] = NODE_DIND_BLOCK; 389 noffset[n] = 5 + (dptrs_per_blk * 2); 390 offset[n++] = block / indirect_blks; 391 noffset[n] = 6 + (dptrs_per_blk * 2) + 392 offset[n - 1] * (dptrs_per_blk + 1); 393 offset[n++] = (block / direct_blks) % dptrs_per_blk; 394 noffset[n] = 7 + (dptrs_per_blk * 2) + 395 offset[n - 2] * (dptrs_per_blk + 1) + 396 offset[n - 1]; 397 offset[n] = block % direct_blks; 398 level = 3; 399 goto got; 400 } else { 401 BUG(); 402 } 403 got: 404 return level; 405 } 406 407 /* 408 * Caller should call f2fs_put_dnode(dn). 409 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 410 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 411 * In the case of RDONLY_NODE, we don't need to care about mutex. 412 */ 413 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 414 { 415 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 416 struct page *npage[4]; 417 struct page *parent; 418 int offset[4]; 419 unsigned int noffset[4]; 420 nid_t nids[4]; 421 int level, i; 422 int err = 0; 423 424 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 425 426 nids[0] = dn->inode->i_ino; 427 npage[0] = dn->inode_page; 428 429 if (!npage[0]) { 430 npage[0] = get_node_page(sbi, nids[0]); 431 if (IS_ERR(npage[0])) 432 return PTR_ERR(npage[0]); 433 } 434 parent = npage[0]; 435 if (level != 0) 436 nids[1] = get_nid(parent, offset[0], true); 437 dn->inode_page = npage[0]; 438 dn->inode_page_locked = true; 439 440 /* get indirect or direct nodes */ 441 for (i = 1; i <= level; i++) { 442 bool done = false; 443 444 if (!nids[i] && mode == ALLOC_NODE) { 445 /* alloc new node */ 446 if (!alloc_nid(sbi, &(nids[i]))) { 447 err = -ENOSPC; 448 goto release_pages; 449 } 450 451 dn->nid = nids[i]; 452 npage[i] = new_node_page(dn, noffset[i], NULL); 453 if (IS_ERR(npage[i])) { 454 alloc_nid_failed(sbi, nids[i]); 455 err = PTR_ERR(npage[i]); 456 goto release_pages; 457 } 458 459 set_nid(parent, offset[i - 1], nids[i], i == 1); 460 alloc_nid_done(sbi, nids[i]); 461 done = true; 462 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 463 npage[i] = get_node_page_ra(parent, offset[i - 1]); 464 if (IS_ERR(npage[i])) { 465 err = PTR_ERR(npage[i]); 466 goto release_pages; 467 } 468 done = true; 469 } 470 if (i == 1) { 471 dn->inode_page_locked = false; 472 unlock_page(parent); 473 } else { 474 f2fs_put_page(parent, 1); 475 } 476 477 if (!done) { 478 npage[i] = get_node_page(sbi, nids[i]); 479 if (IS_ERR(npage[i])) { 480 err = PTR_ERR(npage[i]); 481 f2fs_put_page(npage[0], 0); 482 goto release_out; 483 } 484 } 485 if (i < level) { 486 parent = npage[i]; 487 nids[i + 1] = get_nid(parent, offset[i], false); 488 } 489 } 490 dn->nid = nids[level]; 491 dn->ofs_in_node = offset[level]; 492 dn->node_page = npage[level]; 493 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 494 return 0; 495 496 release_pages: 497 f2fs_put_page(parent, 1); 498 if (i > 1) 499 f2fs_put_page(npage[0], 0); 500 release_out: 501 dn->inode_page = NULL; 502 dn->node_page = NULL; 503 return err; 504 } 505 506 static void truncate_node(struct dnode_of_data *dn) 507 { 508 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 509 struct node_info ni; 510 511 get_node_info(sbi, dn->nid, &ni); 512 if (dn->inode->i_blocks == 0) { 513 f2fs_bug_on(ni.blk_addr != NULL_ADDR); 514 goto invalidate; 515 } 516 f2fs_bug_on(ni.blk_addr == NULL_ADDR); 517 518 /* Deallocate node address */ 519 invalidate_blocks(sbi, ni.blk_addr); 520 dec_valid_node_count(sbi, dn->inode); 521 set_node_addr(sbi, &ni, NULL_ADDR, false); 522 523 if (dn->nid == dn->inode->i_ino) { 524 remove_orphan_inode(sbi, dn->nid); 525 dec_valid_inode_count(sbi); 526 } else { 527 sync_inode_page(dn); 528 } 529 invalidate: 530 clear_node_page_dirty(dn->node_page); 531 F2FS_SET_SB_DIRT(sbi); 532 533 f2fs_put_page(dn->node_page, 1); 534 535 invalidate_mapping_pages(NODE_MAPPING(sbi), 536 dn->node_page->index, dn->node_page->index); 537 538 dn->node_page = NULL; 539 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 540 } 541 542 static int truncate_dnode(struct dnode_of_data *dn) 543 { 544 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 545 struct page *page; 546 547 if (dn->nid == 0) 548 return 1; 549 550 /* get direct node */ 551 page = get_node_page(sbi, dn->nid); 552 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 553 return 1; 554 else if (IS_ERR(page)) 555 return PTR_ERR(page); 556 557 /* Make dnode_of_data for parameter */ 558 dn->node_page = page; 559 dn->ofs_in_node = 0; 560 truncate_data_blocks(dn); 561 truncate_node(dn); 562 return 1; 563 } 564 565 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 566 int ofs, int depth) 567 { 568 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 569 struct dnode_of_data rdn = *dn; 570 struct page *page; 571 struct f2fs_node *rn; 572 nid_t child_nid; 573 unsigned int child_nofs; 574 int freed = 0; 575 int i, ret; 576 577 if (dn->nid == 0) 578 return NIDS_PER_BLOCK + 1; 579 580 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 581 582 page = get_node_page(sbi, dn->nid); 583 if (IS_ERR(page)) { 584 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 585 return PTR_ERR(page); 586 } 587 588 rn = F2FS_NODE(page); 589 if (depth < 3) { 590 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 591 child_nid = le32_to_cpu(rn->in.nid[i]); 592 if (child_nid == 0) 593 continue; 594 rdn.nid = child_nid; 595 ret = truncate_dnode(&rdn); 596 if (ret < 0) 597 goto out_err; 598 set_nid(page, i, 0, false); 599 } 600 } else { 601 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 602 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 603 child_nid = le32_to_cpu(rn->in.nid[i]); 604 if (child_nid == 0) { 605 child_nofs += NIDS_PER_BLOCK + 1; 606 continue; 607 } 608 rdn.nid = child_nid; 609 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 610 if (ret == (NIDS_PER_BLOCK + 1)) { 611 set_nid(page, i, 0, false); 612 child_nofs += ret; 613 } else if (ret < 0 && ret != -ENOENT) { 614 goto out_err; 615 } 616 } 617 freed = child_nofs; 618 } 619 620 if (!ofs) { 621 /* remove current indirect node */ 622 dn->node_page = page; 623 truncate_node(dn); 624 freed++; 625 } else { 626 f2fs_put_page(page, 1); 627 } 628 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 629 return freed; 630 631 out_err: 632 f2fs_put_page(page, 1); 633 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 634 return ret; 635 } 636 637 static int truncate_partial_nodes(struct dnode_of_data *dn, 638 struct f2fs_inode *ri, int *offset, int depth) 639 { 640 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 641 struct page *pages[2]; 642 nid_t nid[3]; 643 nid_t child_nid; 644 int err = 0; 645 int i; 646 int idx = depth - 2; 647 648 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 649 if (!nid[0]) 650 return 0; 651 652 /* get indirect nodes in the path */ 653 for (i = 0; i < idx + 1; i++) { 654 /* refernece count'll be increased */ 655 pages[i] = get_node_page(sbi, nid[i]); 656 if (IS_ERR(pages[i])) { 657 err = PTR_ERR(pages[i]); 658 idx = i - 1; 659 goto fail; 660 } 661 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 662 } 663 664 /* free direct nodes linked to a partial indirect node */ 665 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 666 child_nid = get_nid(pages[idx], i, false); 667 if (!child_nid) 668 continue; 669 dn->nid = child_nid; 670 err = truncate_dnode(dn); 671 if (err < 0) 672 goto fail; 673 set_nid(pages[idx], i, 0, false); 674 } 675 676 if (offset[idx + 1] == 0) { 677 dn->node_page = pages[idx]; 678 dn->nid = nid[idx]; 679 truncate_node(dn); 680 } else { 681 f2fs_put_page(pages[idx], 1); 682 } 683 offset[idx]++; 684 offset[idx + 1] = 0; 685 idx--; 686 fail: 687 for (i = idx; i >= 0; i--) 688 f2fs_put_page(pages[i], 1); 689 690 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 691 692 return err; 693 } 694 695 /* 696 * All the block addresses of data and nodes should be nullified. 697 */ 698 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 699 { 700 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 701 int err = 0, cont = 1; 702 int level, offset[4], noffset[4]; 703 unsigned int nofs = 0; 704 struct f2fs_inode *ri; 705 struct dnode_of_data dn; 706 struct page *page; 707 708 trace_f2fs_truncate_inode_blocks_enter(inode, from); 709 710 level = get_node_path(F2FS_I(inode), from, offset, noffset); 711 restart: 712 page = get_node_page(sbi, inode->i_ino); 713 if (IS_ERR(page)) { 714 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 715 return PTR_ERR(page); 716 } 717 718 set_new_dnode(&dn, inode, page, NULL, 0); 719 unlock_page(page); 720 721 ri = F2FS_INODE(page); 722 switch (level) { 723 case 0: 724 case 1: 725 nofs = noffset[1]; 726 break; 727 case 2: 728 nofs = noffset[1]; 729 if (!offset[level - 1]) 730 goto skip_partial; 731 err = truncate_partial_nodes(&dn, ri, offset, level); 732 if (err < 0 && err != -ENOENT) 733 goto fail; 734 nofs += 1 + NIDS_PER_BLOCK; 735 break; 736 case 3: 737 nofs = 5 + 2 * NIDS_PER_BLOCK; 738 if (!offset[level - 1]) 739 goto skip_partial; 740 err = truncate_partial_nodes(&dn, ri, offset, level); 741 if (err < 0 && err != -ENOENT) 742 goto fail; 743 break; 744 default: 745 BUG(); 746 } 747 748 skip_partial: 749 while (cont) { 750 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 751 switch (offset[0]) { 752 case NODE_DIR1_BLOCK: 753 case NODE_DIR2_BLOCK: 754 err = truncate_dnode(&dn); 755 break; 756 757 case NODE_IND1_BLOCK: 758 case NODE_IND2_BLOCK: 759 err = truncate_nodes(&dn, nofs, offset[1], 2); 760 break; 761 762 case NODE_DIND_BLOCK: 763 err = truncate_nodes(&dn, nofs, offset[1], 3); 764 cont = 0; 765 break; 766 767 default: 768 BUG(); 769 } 770 if (err < 0 && err != -ENOENT) 771 goto fail; 772 if (offset[1] == 0 && 773 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 774 lock_page(page); 775 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 776 f2fs_put_page(page, 1); 777 goto restart; 778 } 779 f2fs_wait_on_page_writeback(page, NODE); 780 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 781 set_page_dirty(page); 782 unlock_page(page); 783 } 784 offset[1] = 0; 785 offset[0]++; 786 nofs += err; 787 } 788 fail: 789 f2fs_put_page(page, 0); 790 trace_f2fs_truncate_inode_blocks_exit(inode, err); 791 return err > 0 ? 0 : err; 792 } 793 794 int truncate_xattr_node(struct inode *inode, struct page *page) 795 { 796 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 797 nid_t nid = F2FS_I(inode)->i_xattr_nid; 798 struct dnode_of_data dn; 799 struct page *npage; 800 801 if (!nid) 802 return 0; 803 804 npage = get_node_page(sbi, nid); 805 if (IS_ERR(npage)) 806 return PTR_ERR(npage); 807 808 F2FS_I(inode)->i_xattr_nid = 0; 809 810 /* need to do checkpoint during fsync */ 811 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 812 813 set_new_dnode(&dn, inode, page, npage, nid); 814 815 if (page) 816 dn.inode_page_locked = true; 817 truncate_node(&dn); 818 return 0; 819 } 820 821 /* 822 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 823 * f2fs_unlock_op(). 824 */ 825 void remove_inode_page(struct inode *inode) 826 { 827 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 828 struct page *page; 829 nid_t ino = inode->i_ino; 830 struct dnode_of_data dn; 831 832 page = get_node_page(sbi, ino); 833 if (IS_ERR(page)) 834 return; 835 836 if (truncate_xattr_node(inode, page)) { 837 f2fs_put_page(page, 1); 838 return; 839 } 840 /* 0 is possible, after f2fs_new_inode() is failed */ 841 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1); 842 set_new_dnode(&dn, inode, page, page, ino); 843 truncate_node(&dn); 844 } 845 846 struct page *new_inode_page(struct inode *inode, const struct qstr *name) 847 { 848 struct dnode_of_data dn; 849 850 /* allocate inode page for new inode */ 851 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 852 853 /* caller should f2fs_put_page(page, 1); */ 854 return new_node_page(&dn, 0, NULL); 855 } 856 857 struct page *new_node_page(struct dnode_of_data *dn, 858 unsigned int ofs, struct page *ipage) 859 { 860 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 861 struct node_info old_ni, new_ni; 862 struct page *page; 863 int err; 864 865 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 866 return ERR_PTR(-EPERM); 867 868 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 869 if (!page) 870 return ERR_PTR(-ENOMEM); 871 872 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 873 err = -ENOSPC; 874 goto fail; 875 } 876 877 get_node_info(sbi, dn->nid, &old_ni); 878 879 /* Reinitialize old_ni with new node page */ 880 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR); 881 new_ni = old_ni; 882 new_ni.ino = dn->inode->i_ino; 883 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 884 885 f2fs_wait_on_page_writeback(page, NODE); 886 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 887 set_cold_node(dn->inode, page); 888 SetPageUptodate(page); 889 set_page_dirty(page); 890 891 if (f2fs_has_xattr_block(ofs)) 892 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 893 894 dn->node_page = page; 895 if (ipage) 896 update_inode(dn->inode, ipage); 897 else 898 sync_inode_page(dn); 899 if (ofs == 0) 900 inc_valid_inode_count(sbi); 901 902 return page; 903 904 fail: 905 clear_node_page_dirty(page); 906 f2fs_put_page(page, 1); 907 return ERR_PTR(err); 908 } 909 910 /* 911 * Caller should do after getting the following values. 912 * 0: f2fs_put_page(page, 0) 913 * LOCKED_PAGE: f2fs_put_page(page, 1) 914 * error: nothing 915 */ 916 static int read_node_page(struct page *page, int rw) 917 { 918 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 919 struct node_info ni; 920 921 get_node_info(sbi, page->index, &ni); 922 923 if (unlikely(ni.blk_addr == NULL_ADDR)) { 924 f2fs_put_page(page, 1); 925 return -ENOENT; 926 } 927 928 if (PageUptodate(page)) 929 return LOCKED_PAGE; 930 931 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); 932 } 933 934 /* 935 * Readahead a node page 936 */ 937 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 938 { 939 struct page *apage; 940 int err; 941 942 apage = find_get_page(NODE_MAPPING(sbi), nid); 943 if (apage && PageUptodate(apage)) { 944 f2fs_put_page(apage, 0); 945 return; 946 } 947 f2fs_put_page(apage, 0); 948 949 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 950 if (!apage) 951 return; 952 953 err = read_node_page(apage, READA); 954 if (err == 0) 955 f2fs_put_page(apage, 0); 956 else if (err == LOCKED_PAGE) 957 f2fs_put_page(apage, 1); 958 } 959 960 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 961 { 962 struct page *page; 963 int err; 964 repeat: 965 page = grab_cache_page(NODE_MAPPING(sbi), nid); 966 if (!page) 967 return ERR_PTR(-ENOMEM); 968 969 err = read_node_page(page, READ_SYNC); 970 if (err < 0) 971 return ERR_PTR(err); 972 else if (err == LOCKED_PAGE) 973 goto got_it; 974 975 lock_page(page); 976 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 977 f2fs_put_page(page, 1); 978 return ERR_PTR(-EIO); 979 } 980 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 981 f2fs_put_page(page, 1); 982 goto repeat; 983 } 984 got_it: 985 return page; 986 } 987 988 /* 989 * Return a locked page for the desired node page. 990 * And, readahead MAX_RA_NODE number of node pages. 991 */ 992 struct page *get_node_page_ra(struct page *parent, int start) 993 { 994 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 995 struct blk_plug plug; 996 struct page *page; 997 int err, i, end; 998 nid_t nid; 999 1000 /* First, try getting the desired direct node. */ 1001 nid = get_nid(parent, start, false); 1002 if (!nid) 1003 return ERR_PTR(-ENOENT); 1004 repeat: 1005 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1006 if (!page) 1007 return ERR_PTR(-ENOMEM); 1008 1009 err = read_node_page(page, READ_SYNC); 1010 if (err < 0) 1011 return ERR_PTR(err); 1012 else if (err == LOCKED_PAGE) 1013 goto page_hit; 1014 1015 blk_start_plug(&plug); 1016 1017 /* Then, try readahead for siblings of the desired node */ 1018 end = start + MAX_RA_NODE; 1019 end = min(end, NIDS_PER_BLOCK); 1020 for (i = start + 1; i < end; i++) { 1021 nid = get_nid(parent, i, false); 1022 if (!nid) 1023 continue; 1024 ra_node_page(sbi, nid); 1025 } 1026 1027 blk_finish_plug(&plug); 1028 1029 lock_page(page); 1030 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1031 f2fs_put_page(page, 1); 1032 goto repeat; 1033 } 1034 page_hit: 1035 if (unlikely(!PageUptodate(page))) { 1036 f2fs_put_page(page, 1); 1037 return ERR_PTR(-EIO); 1038 } 1039 return page; 1040 } 1041 1042 void sync_inode_page(struct dnode_of_data *dn) 1043 { 1044 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1045 update_inode(dn->inode, dn->node_page); 1046 } else if (dn->inode_page) { 1047 if (!dn->inode_page_locked) 1048 lock_page(dn->inode_page); 1049 update_inode(dn->inode, dn->inode_page); 1050 if (!dn->inode_page_locked) 1051 unlock_page(dn->inode_page); 1052 } else { 1053 update_inode_page(dn->inode); 1054 } 1055 } 1056 1057 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1058 struct writeback_control *wbc) 1059 { 1060 pgoff_t index, end; 1061 struct pagevec pvec; 1062 int step = ino ? 2 : 0; 1063 int nwritten = 0, wrote = 0; 1064 1065 pagevec_init(&pvec, 0); 1066 1067 next_step: 1068 index = 0; 1069 end = LONG_MAX; 1070 1071 while (index <= end) { 1072 int i, nr_pages; 1073 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1074 PAGECACHE_TAG_DIRTY, 1075 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1076 if (nr_pages == 0) 1077 break; 1078 1079 for (i = 0; i < nr_pages; i++) { 1080 struct page *page = pvec.pages[i]; 1081 1082 /* 1083 * flushing sequence with step: 1084 * 0. indirect nodes 1085 * 1. dentry dnodes 1086 * 2. file dnodes 1087 */ 1088 if (step == 0 && IS_DNODE(page)) 1089 continue; 1090 if (step == 1 && (!IS_DNODE(page) || 1091 is_cold_node(page))) 1092 continue; 1093 if (step == 2 && (!IS_DNODE(page) || 1094 !is_cold_node(page))) 1095 continue; 1096 1097 /* 1098 * If an fsync mode, 1099 * we should not skip writing node pages. 1100 */ 1101 if (ino && ino_of_node(page) == ino) 1102 lock_page(page); 1103 else if (!trylock_page(page)) 1104 continue; 1105 1106 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1107 continue_unlock: 1108 unlock_page(page); 1109 continue; 1110 } 1111 if (ino && ino_of_node(page) != ino) 1112 goto continue_unlock; 1113 1114 if (!PageDirty(page)) { 1115 /* someone wrote it for us */ 1116 goto continue_unlock; 1117 } 1118 1119 if (!clear_page_dirty_for_io(page)) 1120 goto continue_unlock; 1121 1122 /* called by fsync() */ 1123 if (ino && IS_DNODE(page)) { 1124 int mark = !is_checkpointed_node(sbi, ino); 1125 set_fsync_mark(page, 1); 1126 if (IS_INODE(page)) 1127 set_dentry_mark(page, mark); 1128 nwritten++; 1129 } else { 1130 set_fsync_mark(page, 0); 1131 set_dentry_mark(page, 0); 1132 } 1133 NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); 1134 wrote++; 1135 1136 if (--wbc->nr_to_write == 0) 1137 break; 1138 } 1139 pagevec_release(&pvec); 1140 cond_resched(); 1141 1142 if (wbc->nr_to_write == 0) { 1143 step = 2; 1144 break; 1145 } 1146 } 1147 1148 if (step < 2) { 1149 step++; 1150 goto next_step; 1151 } 1152 1153 if (wrote) 1154 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1155 return nwritten; 1156 } 1157 1158 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1159 { 1160 pgoff_t index = 0, end = LONG_MAX; 1161 struct pagevec pvec; 1162 int ret2 = 0, ret = 0; 1163 1164 pagevec_init(&pvec, 0); 1165 1166 while (index <= end) { 1167 int i, nr_pages; 1168 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1169 PAGECACHE_TAG_WRITEBACK, 1170 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1171 if (nr_pages == 0) 1172 break; 1173 1174 for (i = 0; i < nr_pages; i++) { 1175 struct page *page = pvec.pages[i]; 1176 1177 /* until radix tree lookup accepts end_index */ 1178 if (unlikely(page->index > end)) 1179 continue; 1180 1181 if (ino && ino_of_node(page) == ino) { 1182 f2fs_wait_on_page_writeback(page, NODE); 1183 if (TestClearPageError(page)) 1184 ret = -EIO; 1185 } 1186 } 1187 pagevec_release(&pvec); 1188 cond_resched(); 1189 } 1190 1191 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1192 ret2 = -ENOSPC; 1193 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1194 ret2 = -EIO; 1195 if (!ret) 1196 ret = ret2; 1197 return ret; 1198 } 1199 1200 static int f2fs_write_node_page(struct page *page, 1201 struct writeback_control *wbc) 1202 { 1203 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1204 nid_t nid; 1205 block_t new_addr; 1206 struct node_info ni; 1207 struct f2fs_io_info fio = { 1208 .type = NODE, 1209 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1210 }; 1211 1212 trace_f2fs_writepage(page, NODE); 1213 1214 if (unlikely(sbi->por_doing)) 1215 goto redirty_out; 1216 1217 f2fs_wait_on_page_writeback(page, NODE); 1218 1219 /* get old block addr of this node page */ 1220 nid = nid_of_node(page); 1221 f2fs_bug_on(page->index != nid); 1222 1223 get_node_info(sbi, nid, &ni); 1224 1225 /* This page is already truncated */ 1226 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1227 dec_page_count(sbi, F2FS_DIRTY_NODES); 1228 unlock_page(page); 1229 return 0; 1230 } 1231 1232 if (wbc->for_reclaim) 1233 goto redirty_out; 1234 1235 mutex_lock(&sbi->node_write); 1236 set_page_writeback(page); 1237 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1238 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1239 dec_page_count(sbi, F2FS_DIRTY_NODES); 1240 mutex_unlock(&sbi->node_write); 1241 unlock_page(page); 1242 return 0; 1243 1244 redirty_out: 1245 redirty_page_for_writepage(wbc, page); 1246 return AOP_WRITEPAGE_ACTIVATE; 1247 } 1248 1249 static int f2fs_write_node_pages(struct address_space *mapping, 1250 struct writeback_control *wbc) 1251 { 1252 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1253 long diff; 1254 1255 trace_f2fs_writepages(mapping->host, wbc, NODE); 1256 1257 /* balancing f2fs's metadata in background */ 1258 f2fs_balance_fs_bg(sbi); 1259 1260 /* collect a number of dirty node pages and write together */ 1261 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1262 goto skip_write; 1263 1264 diff = nr_pages_to_write(sbi, NODE, wbc); 1265 wbc->sync_mode = WB_SYNC_NONE; 1266 sync_node_pages(sbi, 0, wbc); 1267 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1268 return 0; 1269 1270 skip_write: 1271 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1272 return 0; 1273 } 1274 1275 static int f2fs_set_node_page_dirty(struct page *page) 1276 { 1277 struct address_space *mapping = page->mapping; 1278 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1279 1280 trace_f2fs_set_page_dirty(page, NODE); 1281 1282 SetPageUptodate(page); 1283 if (!PageDirty(page)) { 1284 __set_page_dirty_nobuffers(page); 1285 inc_page_count(sbi, F2FS_DIRTY_NODES); 1286 SetPagePrivate(page); 1287 return 1; 1288 } 1289 return 0; 1290 } 1291 1292 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1293 unsigned int length) 1294 { 1295 struct inode *inode = page->mapping->host; 1296 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1297 if (PageDirty(page)) 1298 dec_page_count(sbi, F2FS_DIRTY_NODES); 1299 ClearPagePrivate(page); 1300 } 1301 1302 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1303 { 1304 ClearPagePrivate(page); 1305 return 1; 1306 } 1307 1308 /* 1309 * Structure of the f2fs node operations 1310 */ 1311 const struct address_space_operations f2fs_node_aops = { 1312 .writepage = f2fs_write_node_page, 1313 .writepages = f2fs_write_node_pages, 1314 .set_page_dirty = f2fs_set_node_page_dirty, 1315 .invalidatepage = f2fs_invalidate_node_page, 1316 .releasepage = f2fs_release_node_page, 1317 }; 1318 1319 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1320 nid_t n) 1321 { 1322 return radix_tree_lookup(&nm_i->free_nid_root, n); 1323 } 1324 1325 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1326 struct free_nid *i) 1327 { 1328 list_del(&i->list); 1329 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1330 } 1331 1332 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1333 { 1334 struct f2fs_nm_info *nm_i = NM_I(sbi); 1335 struct free_nid *i; 1336 struct nat_entry *ne; 1337 bool allocated = false; 1338 1339 if (!available_free_memory(sbi, FREE_NIDS)) 1340 return -1; 1341 1342 /* 0 nid should not be used */ 1343 if (unlikely(nid == 0)) 1344 return 0; 1345 1346 if (build) { 1347 /* do not add allocated nids */ 1348 read_lock(&nm_i->nat_tree_lock); 1349 ne = __lookup_nat_cache(nm_i, nid); 1350 if (ne && 1351 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR)) 1352 allocated = true; 1353 read_unlock(&nm_i->nat_tree_lock); 1354 if (allocated) 1355 return 0; 1356 } 1357 1358 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1359 i->nid = nid; 1360 i->state = NID_NEW; 1361 1362 spin_lock(&nm_i->free_nid_list_lock); 1363 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1364 spin_unlock(&nm_i->free_nid_list_lock); 1365 kmem_cache_free(free_nid_slab, i); 1366 return 0; 1367 } 1368 list_add_tail(&i->list, &nm_i->free_nid_list); 1369 nm_i->fcnt++; 1370 spin_unlock(&nm_i->free_nid_list_lock); 1371 return 1; 1372 } 1373 1374 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1375 { 1376 struct free_nid *i; 1377 bool need_free = false; 1378 1379 spin_lock(&nm_i->free_nid_list_lock); 1380 i = __lookup_free_nid_list(nm_i, nid); 1381 if (i && i->state == NID_NEW) { 1382 __del_from_free_nid_list(nm_i, i); 1383 nm_i->fcnt--; 1384 need_free = true; 1385 } 1386 spin_unlock(&nm_i->free_nid_list_lock); 1387 1388 if (need_free) 1389 kmem_cache_free(free_nid_slab, i); 1390 } 1391 1392 static void scan_nat_page(struct f2fs_sb_info *sbi, 1393 struct page *nat_page, nid_t start_nid) 1394 { 1395 struct f2fs_nm_info *nm_i = NM_I(sbi); 1396 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1397 block_t blk_addr; 1398 int i; 1399 1400 i = start_nid % NAT_ENTRY_PER_BLOCK; 1401 1402 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1403 1404 if (unlikely(start_nid >= nm_i->max_nid)) 1405 break; 1406 1407 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1408 f2fs_bug_on(blk_addr == NEW_ADDR); 1409 if (blk_addr == NULL_ADDR) { 1410 if (add_free_nid(sbi, start_nid, true) < 0) 1411 break; 1412 } 1413 } 1414 } 1415 1416 static void build_free_nids(struct f2fs_sb_info *sbi) 1417 { 1418 struct f2fs_nm_info *nm_i = NM_I(sbi); 1419 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1420 struct f2fs_summary_block *sum = curseg->sum_blk; 1421 int i = 0; 1422 nid_t nid = nm_i->next_scan_nid; 1423 1424 /* Enough entries */ 1425 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1426 return; 1427 1428 /* readahead nat pages to be scanned */ 1429 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); 1430 1431 while (1) { 1432 struct page *page = get_current_nat_page(sbi, nid); 1433 1434 scan_nat_page(sbi, page, nid); 1435 f2fs_put_page(page, 1); 1436 1437 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1438 if (unlikely(nid >= nm_i->max_nid)) 1439 nid = 0; 1440 1441 if (i++ == FREE_NID_PAGES) 1442 break; 1443 } 1444 1445 /* go to the next free nat pages to find free nids abundantly */ 1446 nm_i->next_scan_nid = nid; 1447 1448 /* find free nids from current sum_pages */ 1449 mutex_lock(&curseg->curseg_mutex); 1450 for (i = 0; i < nats_in_cursum(sum); i++) { 1451 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1452 nid = le32_to_cpu(nid_in_journal(sum, i)); 1453 if (addr == NULL_ADDR) 1454 add_free_nid(sbi, nid, true); 1455 else 1456 remove_free_nid(nm_i, nid); 1457 } 1458 mutex_unlock(&curseg->curseg_mutex); 1459 } 1460 1461 /* 1462 * If this function returns success, caller can obtain a new nid 1463 * from second parameter of this function. 1464 * The returned nid could be used ino as well as nid when inode is created. 1465 */ 1466 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1467 { 1468 struct f2fs_nm_info *nm_i = NM_I(sbi); 1469 struct free_nid *i = NULL; 1470 retry: 1471 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1472 return false; 1473 1474 spin_lock(&nm_i->free_nid_list_lock); 1475 1476 /* We should not use stale free nids created by build_free_nids */ 1477 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1478 f2fs_bug_on(list_empty(&nm_i->free_nid_list)); 1479 list_for_each_entry(i, &nm_i->free_nid_list, list) 1480 if (i->state == NID_NEW) 1481 break; 1482 1483 f2fs_bug_on(i->state != NID_NEW); 1484 *nid = i->nid; 1485 i->state = NID_ALLOC; 1486 nm_i->fcnt--; 1487 spin_unlock(&nm_i->free_nid_list_lock); 1488 return true; 1489 } 1490 spin_unlock(&nm_i->free_nid_list_lock); 1491 1492 /* Let's scan nat pages and its caches to get free nids */ 1493 mutex_lock(&nm_i->build_lock); 1494 build_free_nids(sbi); 1495 mutex_unlock(&nm_i->build_lock); 1496 goto retry; 1497 } 1498 1499 /* 1500 * alloc_nid() should be called prior to this function. 1501 */ 1502 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1503 { 1504 struct f2fs_nm_info *nm_i = NM_I(sbi); 1505 struct free_nid *i; 1506 1507 spin_lock(&nm_i->free_nid_list_lock); 1508 i = __lookup_free_nid_list(nm_i, nid); 1509 f2fs_bug_on(!i || i->state != NID_ALLOC); 1510 __del_from_free_nid_list(nm_i, i); 1511 spin_unlock(&nm_i->free_nid_list_lock); 1512 1513 kmem_cache_free(free_nid_slab, i); 1514 } 1515 1516 /* 1517 * alloc_nid() should be called prior to this function. 1518 */ 1519 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1520 { 1521 struct f2fs_nm_info *nm_i = NM_I(sbi); 1522 struct free_nid *i; 1523 bool need_free = false; 1524 1525 if (!nid) 1526 return; 1527 1528 spin_lock(&nm_i->free_nid_list_lock); 1529 i = __lookup_free_nid_list(nm_i, nid); 1530 f2fs_bug_on(!i || i->state != NID_ALLOC); 1531 if (!available_free_memory(sbi, FREE_NIDS)) { 1532 __del_from_free_nid_list(nm_i, i); 1533 need_free = true; 1534 } else { 1535 i->state = NID_NEW; 1536 nm_i->fcnt++; 1537 } 1538 spin_unlock(&nm_i->free_nid_list_lock); 1539 1540 if (need_free) 1541 kmem_cache_free(free_nid_slab, i); 1542 } 1543 1544 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, 1545 struct f2fs_summary *sum, struct node_info *ni, 1546 block_t new_blkaddr) 1547 { 1548 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); 1549 set_node_addr(sbi, ni, new_blkaddr, false); 1550 clear_node_page_dirty(page); 1551 } 1552 1553 static void recover_inline_xattr(struct inode *inode, struct page *page) 1554 { 1555 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1556 void *src_addr, *dst_addr; 1557 size_t inline_size; 1558 struct page *ipage; 1559 struct f2fs_inode *ri; 1560 1561 if (!f2fs_has_inline_xattr(inode)) 1562 return; 1563 1564 if (!IS_INODE(page)) 1565 return; 1566 1567 ri = F2FS_INODE(page); 1568 if (!(ri->i_inline & F2FS_INLINE_XATTR)) 1569 return; 1570 1571 ipage = get_node_page(sbi, inode->i_ino); 1572 f2fs_bug_on(IS_ERR(ipage)); 1573 1574 dst_addr = inline_xattr_addr(ipage); 1575 src_addr = inline_xattr_addr(page); 1576 inline_size = inline_xattr_size(inode); 1577 1578 f2fs_wait_on_page_writeback(ipage, NODE); 1579 memcpy(dst_addr, src_addr, inline_size); 1580 1581 update_inode(inode, ipage); 1582 f2fs_put_page(ipage, 1); 1583 } 1584 1585 bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1586 { 1587 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1588 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1589 nid_t new_xnid = nid_of_node(page); 1590 struct node_info ni; 1591 1592 recover_inline_xattr(inode, page); 1593 1594 if (!f2fs_has_xattr_block(ofs_of_node(page))) 1595 return false; 1596 1597 /* 1: invalidate the previous xattr nid */ 1598 if (!prev_xnid) 1599 goto recover_xnid; 1600 1601 /* Deallocate node address */ 1602 get_node_info(sbi, prev_xnid, &ni); 1603 f2fs_bug_on(ni.blk_addr == NULL_ADDR); 1604 invalidate_blocks(sbi, ni.blk_addr); 1605 dec_valid_node_count(sbi, inode); 1606 set_node_addr(sbi, &ni, NULL_ADDR, false); 1607 1608 recover_xnid: 1609 /* 2: allocate new xattr nid */ 1610 if (unlikely(!inc_valid_node_count(sbi, inode))) 1611 f2fs_bug_on(1); 1612 1613 remove_free_nid(NM_I(sbi), new_xnid); 1614 get_node_info(sbi, new_xnid, &ni); 1615 ni.ino = inode->i_ino; 1616 set_node_addr(sbi, &ni, NEW_ADDR, false); 1617 F2FS_I(inode)->i_xattr_nid = new_xnid; 1618 1619 /* 3: update xattr blkaddr */ 1620 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1621 set_node_addr(sbi, &ni, blkaddr, false); 1622 1623 update_inode_page(inode); 1624 return true; 1625 } 1626 1627 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1628 { 1629 struct f2fs_inode *src, *dst; 1630 nid_t ino = ino_of_node(page); 1631 struct node_info old_ni, new_ni; 1632 struct page *ipage; 1633 1634 get_node_info(sbi, ino, &old_ni); 1635 1636 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1637 return -EINVAL; 1638 1639 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1640 if (!ipage) 1641 return -ENOMEM; 1642 1643 /* Should not use this inode from free nid list */ 1644 remove_free_nid(NM_I(sbi), ino); 1645 1646 SetPageUptodate(ipage); 1647 fill_node_footer(ipage, ino, ino, 0, true); 1648 1649 src = F2FS_INODE(page); 1650 dst = F2FS_INODE(ipage); 1651 1652 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1653 dst->i_size = 0; 1654 dst->i_blocks = cpu_to_le64(1); 1655 dst->i_links = cpu_to_le32(1); 1656 dst->i_xattr_nid = 0; 1657 1658 new_ni = old_ni; 1659 new_ni.ino = ino; 1660 1661 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1662 WARN_ON(1); 1663 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1664 inc_valid_inode_count(sbi); 1665 f2fs_put_page(ipage, 1); 1666 return 0; 1667 } 1668 1669 /* 1670 * ra_sum_pages() merge contiguous pages into one bio and submit. 1671 * these pre-readed pages are alloced in bd_inode's mapping tree. 1672 */ 1673 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, 1674 int start, int nrpages) 1675 { 1676 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1677 struct address_space *mapping = inode->i_mapping; 1678 int i, page_idx = start; 1679 struct f2fs_io_info fio = { 1680 .type = META, 1681 .rw = READ_SYNC | REQ_META | REQ_PRIO 1682 }; 1683 1684 for (i = 0; page_idx < start + nrpages; page_idx++, i++) { 1685 /* alloc page in bd_inode for reading node summary info */ 1686 pages[i] = grab_cache_page(mapping, page_idx); 1687 if (!pages[i]) 1688 break; 1689 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); 1690 } 1691 1692 f2fs_submit_merged_bio(sbi, META, READ); 1693 return i; 1694 } 1695 1696 int restore_node_summary(struct f2fs_sb_info *sbi, 1697 unsigned int segno, struct f2fs_summary_block *sum) 1698 { 1699 struct f2fs_node *rn; 1700 struct f2fs_summary *sum_entry; 1701 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1702 block_t addr; 1703 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 1704 struct page *pages[bio_blocks]; 1705 int i, idx, last_offset, nrpages, err = 0; 1706 1707 /* scan the node segment */ 1708 last_offset = sbi->blocks_per_seg; 1709 addr = START_BLOCK(sbi, segno); 1710 sum_entry = &sum->entries[0]; 1711 1712 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { 1713 nrpages = min(last_offset - i, bio_blocks); 1714 1715 /* read ahead node pages */ 1716 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1717 if (!nrpages) 1718 return -ENOMEM; 1719 1720 for (idx = 0; idx < nrpages; idx++) { 1721 if (err) 1722 goto skip; 1723 1724 lock_page(pages[idx]); 1725 if (unlikely(!PageUptodate(pages[idx]))) { 1726 err = -EIO; 1727 } else { 1728 rn = F2FS_NODE(pages[idx]); 1729 sum_entry->nid = rn->footer.nid; 1730 sum_entry->version = 0; 1731 sum_entry->ofs_in_node = 0; 1732 sum_entry++; 1733 } 1734 unlock_page(pages[idx]); 1735 skip: 1736 page_cache_release(pages[idx]); 1737 } 1738 1739 invalidate_mapping_pages(inode->i_mapping, addr, 1740 addr + nrpages); 1741 } 1742 return err; 1743 } 1744 1745 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) 1746 { 1747 struct f2fs_nm_info *nm_i = NM_I(sbi); 1748 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1749 struct f2fs_summary_block *sum = curseg->sum_blk; 1750 int i; 1751 1752 mutex_lock(&curseg->curseg_mutex); 1753 1754 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { 1755 mutex_unlock(&curseg->curseg_mutex); 1756 return false; 1757 } 1758 1759 for (i = 0; i < nats_in_cursum(sum); i++) { 1760 struct nat_entry *ne; 1761 struct f2fs_nat_entry raw_ne; 1762 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1763 1764 raw_ne = nat_in_journal(sum, i); 1765 retry: 1766 write_lock(&nm_i->nat_tree_lock); 1767 ne = __lookup_nat_cache(nm_i, nid); 1768 if (ne) { 1769 __set_nat_cache_dirty(nm_i, ne); 1770 write_unlock(&nm_i->nat_tree_lock); 1771 continue; 1772 } 1773 ne = grab_nat_entry(nm_i, nid); 1774 if (!ne) { 1775 write_unlock(&nm_i->nat_tree_lock); 1776 goto retry; 1777 } 1778 node_info_from_raw_nat(&ne->ni, &raw_ne); 1779 __set_nat_cache_dirty(nm_i, ne); 1780 write_unlock(&nm_i->nat_tree_lock); 1781 } 1782 update_nats_in_cursum(sum, -i); 1783 mutex_unlock(&curseg->curseg_mutex); 1784 return true; 1785 } 1786 1787 /* 1788 * This function is called during the checkpointing process. 1789 */ 1790 void flush_nat_entries(struct f2fs_sb_info *sbi) 1791 { 1792 struct f2fs_nm_info *nm_i = NM_I(sbi); 1793 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1794 struct f2fs_summary_block *sum = curseg->sum_blk; 1795 struct nat_entry *ne, *cur; 1796 struct page *page = NULL; 1797 struct f2fs_nat_block *nat_blk = NULL; 1798 nid_t start_nid = 0, end_nid = 0; 1799 bool flushed; 1800 1801 flushed = flush_nats_in_journal(sbi); 1802 1803 if (!flushed) 1804 mutex_lock(&curseg->curseg_mutex); 1805 1806 /* 1) flush dirty nat caches */ 1807 list_for_each_entry_safe(ne, cur, &nm_i->dirty_nat_entries, list) { 1808 nid_t nid; 1809 struct f2fs_nat_entry raw_ne; 1810 int offset = -1; 1811 1812 if (nat_get_blkaddr(ne) == NEW_ADDR) 1813 continue; 1814 1815 nid = nat_get_nid(ne); 1816 1817 if (flushed) 1818 goto to_nat_page; 1819 1820 /* if there is room for nat enries in curseg->sumpage */ 1821 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); 1822 if (offset >= 0) { 1823 raw_ne = nat_in_journal(sum, offset); 1824 goto flush_now; 1825 } 1826 to_nat_page: 1827 if (!page || (start_nid > nid || nid > end_nid)) { 1828 if (page) { 1829 f2fs_put_page(page, 1); 1830 page = NULL; 1831 } 1832 start_nid = START_NID(nid); 1833 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; 1834 1835 /* 1836 * get nat block with dirty flag, increased reference 1837 * count, mapped and lock 1838 */ 1839 page = get_next_nat_page(sbi, start_nid); 1840 nat_blk = page_address(page); 1841 } 1842 1843 f2fs_bug_on(!nat_blk); 1844 raw_ne = nat_blk->entries[nid - start_nid]; 1845 flush_now: 1846 raw_nat_from_node_info(&raw_ne, &ne->ni); 1847 1848 if (offset < 0) { 1849 nat_blk->entries[nid - start_nid] = raw_ne; 1850 } else { 1851 nat_in_journal(sum, offset) = raw_ne; 1852 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1853 } 1854 1855 if (nat_get_blkaddr(ne) == NULL_ADDR && 1856 add_free_nid(sbi, nid, false) <= 0) { 1857 write_lock(&nm_i->nat_tree_lock); 1858 __del_from_nat_cache(nm_i, ne); 1859 write_unlock(&nm_i->nat_tree_lock); 1860 } else { 1861 write_lock(&nm_i->nat_tree_lock); 1862 __clear_nat_cache_dirty(nm_i, ne); 1863 write_unlock(&nm_i->nat_tree_lock); 1864 } 1865 } 1866 if (!flushed) 1867 mutex_unlock(&curseg->curseg_mutex); 1868 f2fs_put_page(page, 1); 1869 } 1870 1871 static int init_node_manager(struct f2fs_sb_info *sbi) 1872 { 1873 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1874 struct f2fs_nm_info *nm_i = NM_I(sbi); 1875 unsigned char *version_bitmap; 1876 unsigned int nat_segs, nat_blocks; 1877 1878 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1879 1880 /* segment_count_nat includes pair segment so divide to 2. */ 1881 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1882 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1883 1884 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1885 1886 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1887 nm_i->available_nids = nm_i->max_nid - 3; 1888 nm_i->fcnt = 0; 1889 nm_i->nat_cnt = 0; 1890 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 1891 1892 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 1893 INIT_LIST_HEAD(&nm_i->free_nid_list); 1894 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1895 INIT_LIST_HEAD(&nm_i->nat_entries); 1896 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1897 1898 mutex_init(&nm_i->build_lock); 1899 spin_lock_init(&nm_i->free_nid_list_lock); 1900 rwlock_init(&nm_i->nat_tree_lock); 1901 1902 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1903 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1904 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1905 if (!version_bitmap) 1906 return -EFAULT; 1907 1908 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1909 GFP_KERNEL); 1910 if (!nm_i->nat_bitmap) 1911 return -ENOMEM; 1912 return 0; 1913 } 1914 1915 int build_node_manager(struct f2fs_sb_info *sbi) 1916 { 1917 int err; 1918 1919 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1920 if (!sbi->nm_info) 1921 return -ENOMEM; 1922 1923 err = init_node_manager(sbi); 1924 if (err) 1925 return err; 1926 1927 build_free_nids(sbi); 1928 return 0; 1929 } 1930 1931 void destroy_node_manager(struct f2fs_sb_info *sbi) 1932 { 1933 struct f2fs_nm_info *nm_i = NM_I(sbi); 1934 struct free_nid *i, *next_i; 1935 struct nat_entry *natvec[NATVEC_SIZE]; 1936 nid_t nid = 0; 1937 unsigned int found; 1938 1939 if (!nm_i) 1940 return; 1941 1942 /* destroy free nid list */ 1943 spin_lock(&nm_i->free_nid_list_lock); 1944 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 1945 f2fs_bug_on(i->state == NID_ALLOC); 1946 __del_from_free_nid_list(nm_i, i); 1947 nm_i->fcnt--; 1948 spin_unlock(&nm_i->free_nid_list_lock); 1949 kmem_cache_free(free_nid_slab, i); 1950 spin_lock(&nm_i->free_nid_list_lock); 1951 } 1952 f2fs_bug_on(nm_i->fcnt); 1953 spin_unlock(&nm_i->free_nid_list_lock); 1954 1955 /* destroy nat cache */ 1956 write_lock(&nm_i->nat_tree_lock); 1957 while ((found = __gang_lookup_nat_cache(nm_i, 1958 nid, NATVEC_SIZE, natvec))) { 1959 unsigned idx; 1960 nid = nat_get_nid(natvec[found - 1]) + 1; 1961 for (idx = 0; idx < found; idx++) 1962 __del_from_nat_cache(nm_i, natvec[idx]); 1963 } 1964 f2fs_bug_on(nm_i->nat_cnt); 1965 write_unlock(&nm_i->nat_tree_lock); 1966 1967 kfree(nm_i->nat_bitmap); 1968 sbi->nm_info = NULL; 1969 kfree(nm_i); 1970 } 1971 1972 int __init create_node_manager_caches(void) 1973 { 1974 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1975 sizeof(struct nat_entry)); 1976 if (!nat_entry_slab) 1977 return -ENOMEM; 1978 1979 free_nid_slab = f2fs_kmem_cache_create("free_nid", 1980 sizeof(struct free_nid)); 1981 if (!free_nid_slab) { 1982 kmem_cache_destroy(nat_entry_slab); 1983 return -ENOMEM; 1984 } 1985 return 0; 1986 } 1987 1988 void destroy_node_manager_caches(void) 1989 { 1990 kmem_cache_destroy(free_nid_slab); 1991 kmem_cache_destroy(nat_entry_slab); 1992 } 1993