1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS inode operations. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/buffer_head.h> 12 #include <linux/gfp.h> 13 #include <linux/mpage.h> 14 #include <linux/pagemap.h> 15 #include <linux/writeback.h> 16 #include <linux/uio.h> 17 #include <linux/fiemap.h> 18 #include "nilfs.h" 19 #include "btnode.h" 20 #include "segment.h" 21 #include "page.h" 22 #include "mdt.h" 23 #include "cpfile.h" 24 #include "ifile.h" 25 26 /** 27 * struct nilfs_iget_args - arguments used during comparison between inodes 28 * @ino: inode number 29 * @cno: checkpoint number 30 * @root: pointer on NILFS root object (mounted checkpoint) 31 * @for_gc: inode for GC flag 32 * @for_btnc: inode for B-tree node cache flag 33 * @for_shadow: inode for shadowed page cache flag 34 */ 35 struct nilfs_iget_args { 36 u64 ino; 37 __u64 cno; 38 struct nilfs_root *root; 39 bool for_gc; 40 bool for_btnc; 41 bool for_shadow; 42 }; 43 44 static int nilfs_iget_test(struct inode *inode, void *opaque); 45 46 void nilfs_inode_add_blocks(struct inode *inode, int n) 47 { 48 struct nilfs_root *root = NILFS_I(inode)->i_root; 49 50 inode_add_bytes(inode, i_blocksize(inode) * n); 51 if (root) 52 atomic64_add(n, &root->blocks_count); 53 } 54 55 void nilfs_inode_sub_blocks(struct inode *inode, int n) 56 { 57 struct nilfs_root *root = NILFS_I(inode)->i_root; 58 59 inode_sub_bytes(inode, i_blocksize(inode) * n); 60 if (root) 61 atomic64_sub(n, &root->blocks_count); 62 } 63 64 /** 65 * nilfs_get_block() - get a file block on the filesystem (callback function) 66 * @inode: inode struct of the target file 67 * @blkoff: file block number 68 * @bh_result: buffer head to be mapped on 69 * @create: indicate whether allocating the block or not when it has not 70 * been allocated yet. 71 * 72 * This function does not issue actual read request of the specified data 73 * block. It is done by VFS. 74 */ 75 int nilfs_get_block(struct inode *inode, sector_t blkoff, 76 struct buffer_head *bh_result, int create) 77 { 78 struct nilfs_inode_info *ii = NILFS_I(inode); 79 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 80 __u64 blknum = 0; 81 int err = 0, ret; 82 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; 83 84 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 85 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 86 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 87 if (ret >= 0) { /* found */ 88 map_bh(bh_result, inode->i_sb, blknum); 89 if (ret > 0) 90 bh_result->b_size = (ret << inode->i_blkbits); 91 goto out; 92 } 93 /* data block was not found */ 94 if (ret == -ENOENT && create) { 95 struct nilfs_transaction_info ti; 96 97 bh_result->b_blocknr = 0; 98 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 99 if (unlikely(err)) 100 goto out; 101 err = nilfs_bmap_insert(ii->i_bmap, blkoff, 102 (unsigned long)bh_result); 103 if (unlikely(err != 0)) { 104 if (err == -EEXIST) { 105 /* 106 * The get_block() function could be called 107 * from multiple callers for an inode. 108 * However, the page having this block must 109 * be locked in this case. 110 */ 111 nilfs_warn(inode->i_sb, 112 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", 113 __func__, inode->i_ino, 114 (unsigned long long)blkoff); 115 err = 0; 116 } 117 nilfs_transaction_abort(inode->i_sb); 118 goto out; 119 } 120 nilfs_mark_inode_dirty_sync(inode); 121 nilfs_transaction_commit(inode->i_sb); /* never fails */ 122 /* Error handling should be detailed */ 123 set_buffer_new(bh_result); 124 set_buffer_delay(bh_result); 125 map_bh(bh_result, inode->i_sb, 0); 126 /* Disk block number must be changed to proper value */ 127 128 } else if (ret == -ENOENT) { 129 /* 130 * not found is not error (e.g. hole); must return without 131 * the mapped state flag. 132 */ 133 ; 134 } else { 135 err = ret; 136 } 137 138 out: 139 return err; 140 } 141 142 /** 143 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} 144 * address_space_operations. 145 * @file: file struct of the file to be read 146 * @folio: the folio to be read 147 */ 148 static int nilfs_read_folio(struct file *file, struct folio *folio) 149 { 150 return mpage_read_folio(folio, nilfs_get_block); 151 } 152 153 static void nilfs_readahead(struct readahead_control *rac) 154 { 155 mpage_readahead(rac, nilfs_get_block); 156 } 157 158 static int nilfs_writepages(struct address_space *mapping, 159 struct writeback_control *wbc) 160 { 161 struct inode *inode = mapping->host; 162 int err = 0; 163 164 if (sb_rdonly(inode->i_sb)) { 165 nilfs_clear_dirty_pages(mapping, false); 166 return -EROFS; 167 } 168 169 if (wbc->sync_mode == WB_SYNC_ALL) 170 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 171 wbc->range_start, 172 wbc->range_end); 173 return err; 174 } 175 176 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 177 { 178 struct inode *inode = page->mapping->host; 179 int err; 180 181 if (sb_rdonly(inode->i_sb)) { 182 /* 183 * It means that filesystem was remounted in read-only 184 * mode because of error or metadata corruption. But we 185 * have dirty pages that try to be flushed in background. 186 * So, here we simply discard this dirty page. 187 */ 188 nilfs_clear_dirty_page(page, false); 189 unlock_page(page); 190 return -EROFS; 191 } 192 193 redirty_page_for_writepage(wbc, page); 194 unlock_page(page); 195 196 if (wbc->sync_mode == WB_SYNC_ALL) { 197 err = nilfs_construct_segment(inode->i_sb); 198 if (unlikely(err)) 199 return err; 200 } else if (wbc->for_reclaim) 201 nilfs_flush_segment(inode->i_sb, inode->i_ino); 202 203 return 0; 204 } 205 206 static bool nilfs_dirty_folio(struct address_space *mapping, 207 struct folio *folio) 208 { 209 struct inode *inode = mapping->host; 210 struct buffer_head *head; 211 unsigned int nr_dirty = 0; 212 bool ret = filemap_dirty_folio(mapping, folio); 213 214 /* 215 * The page may not be locked, eg if called from try_to_unmap_one() 216 */ 217 spin_lock(&mapping->private_lock); 218 head = folio_buffers(folio); 219 if (head) { 220 struct buffer_head *bh = head; 221 222 do { 223 /* Do not mark hole blocks dirty */ 224 if (buffer_dirty(bh) || !buffer_mapped(bh)) 225 continue; 226 227 set_buffer_dirty(bh); 228 nr_dirty++; 229 } while (bh = bh->b_this_page, bh != head); 230 } else if (ret) { 231 nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); 232 } 233 spin_unlock(&mapping->private_lock); 234 235 if (nr_dirty) 236 nilfs_set_file_dirty(inode, nr_dirty); 237 return ret; 238 } 239 240 void nilfs_write_failed(struct address_space *mapping, loff_t to) 241 { 242 struct inode *inode = mapping->host; 243 244 if (to > inode->i_size) { 245 truncate_pagecache(inode, inode->i_size); 246 nilfs_truncate(inode); 247 } 248 } 249 250 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 251 loff_t pos, unsigned len, 252 struct page **pagep, void **fsdata) 253 254 { 255 struct inode *inode = mapping->host; 256 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 257 258 if (unlikely(err)) 259 return err; 260 261 err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); 262 if (unlikely(err)) { 263 nilfs_write_failed(mapping, pos + len); 264 nilfs_transaction_abort(inode->i_sb); 265 } 266 return err; 267 } 268 269 static int nilfs_write_end(struct file *file, struct address_space *mapping, 270 loff_t pos, unsigned len, unsigned copied, 271 struct page *page, void *fsdata) 272 { 273 struct inode *inode = mapping->host; 274 unsigned int start = pos & (PAGE_SIZE - 1); 275 unsigned int nr_dirty; 276 int err; 277 278 nr_dirty = nilfs_page_count_clean_buffers(page, start, 279 start + copied); 280 copied = generic_write_end(file, mapping, pos, len, copied, page, 281 fsdata); 282 nilfs_set_file_dirty(inode, nr_dirty); 283 err = nilfs_transaction_commit(inode->i_sb); 284 return err ? : copied; 285 } 286 287 static ssize_t 288 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 289 { 290 struct inode *inode = file_inode(iocb->ki_filp); 291 292 if (iov_iter_rw(iter) == WRITE) 293 return 0; 294 295 /* Needs synchronization with the cleaner */ 296 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); 297 } 298 299 const struct address_space_operations nilfs_aops = { 300 .writepage = nilfs_writepage, 301 .read_folio = nilfs_read_folio, 302 .writepages = nilfs_writepages, 303 .dirty_folio = nilfs_dirty_folio, 304 .readahead = nilfs_readahead, 305 .write_begin = nilfs_write_begin, 306 .write_end = nilfs_write_end, 307 .invalidate_folio = block_invalidate_folio, 308 .direct_IO = nilfs_direct_IO, 309 .is_partially_uptodate = block_is_partially_uptodate, 310 }; 311 312 static int nilfs_insert_inode_locked(struct inode *inode, 313 struct nilfs_root *root, 314 unsigned long ino) 315 { 316 struct nilfs_iget_args args = { 317 .ino = ino, .root = root, .cno = 0, .for_gc = false, 318 .for_btnc = false, .for_shadow = false 319 }; 320 321 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 322 } 323 324 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 325 { 326 struct super_block *sb = dir->i_sb; 327 struct the_nilfs *nilfs = sb->s_fs_info; 328 struct inode *inode; 329 struct nilfs_inode_info *ii; 330 struct nilfs_root *root; 331 struct buffer_head *bh; 332 int err = -ENOMEM; 333 ino_t ino; 334 335 inode = new_inode(sb); 336 if (unlikely(!inode)) 337 goto failed; 338 339 mapping_set_gfp_mask(inode->i_mapping, 340 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 341 342 root = NILFS_I(dir)->i_root; 343 ii = NILFS_I(inode); 344 ii->i_state = BIT(NILFS_I_NEW); 345 ii->i_root = root; 346 347 err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); 348 if (unlikely(err)) 349 goto failed_ifile_create_inode; 350 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 351 352 if (unlikely(ino < NILFS_USER_INO)) { 353 nilfs_warn(sb, 354 "inode bitmap is inconsistent for reserved inodes"); 355 do { 356 brelse(bh); 357 err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); 358 if (unlikely(err)) 359 goto failed_ifile_create_inode; 360 } while (ino < NILFS_USER_INO); 361 362 nilfs_info(sb, "repaired inode bitmap for reserved inodes"); 363 } 364 ii->i_bh = bh; 365 366 atomic64_inc(&root->inodes_count); 367 inode_init_owner(&init_user_ns, inode, dir, mode); 368 inode->i_ino = ino; 369 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 370 371 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 372 err = nilfs_bmap_read(ii->i_bmap, NULL); 373 if (err < 0) 374 goto failed_after_creation; 375 376 set_bit(NILFS_I_BMAP, &ii->i_state); 377 /* No lock is needed; iget() ensures it. */ 378 } 379 380 ii->i_flags = nilfs_mask_flags( 381 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 382 383 /* ii->i_file_acl = 0; */ 384 /* ii->i_dir_acl = 0; */ 385 ii->i_dir_start_lookup = 0; 386 nilfs_set_inode_flags(inode); 387 spin_lock(&nilfs->ns_next_gen_lock); 388 inode->i_generation = nilfs->ns_next_generation++; 389 spin_unlock(&nilfs->ns_next_gen_lock); 390 if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 391 err = -EIO; 392 goto failed_after_creation; 393 } 394 395 err = nilfs_init_acl(inode, dir); 396 if (unlikely(err)) 397 /* 398 * Never occur. When supporting nilfs_init_acl(), 399 * proper cancellation of above jobs should be considered. 400 */ 401 goto failed_after_creation; 402 403 return inode; 404 405 failed_after_creation: 406 clear_nlink(inode); 407 if (inode->i_state & I_NEW) 408 unlock_new_inode(inode); 409 iput(inode); /* 410 * raw_inode will be deleted through 411 * nilfs_evict_inode(). 412 */ 413 goto failed; 414 415 failed_ifile_create_inode: 416 make_bad_inode(inode); 417 iput(inode); 418 failed: 419 return ERR_PTR(err); 420 } 421 422 void nilfs_set_inode_flags(struct inode *inode) 423 { 424 unsigned int flags = NILFS_I(inode)->i_flags; 425 unsigned int new_fl = 0; 426 427 if (flags & FS_SYNC_FL) 428 new_fl |= S_SYNC; 429 if (flags & FS_APPEND_FL) 430 new_fl |= S_APPEND; 431 if (flags & FS_IMMUTABLE_FL) 432 new_fl |= S_IMMUTABLE; 433 if (flags & FS_NOATIME_FL) 434 new_fl |= S_NOATIME; 435 if (flags & FS_DIRSYNC_FL) 436 new_fl |= S_DIRSYNC; 437 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | 438 S_NOATIME | S_DIRSYNC); 439 } 440 441 int nilfs_read_inode_common(struct inode *inode, 442 struct nilfs_inode *raw_inode) 443 { 444 struct nilfs_inode_info *ii = NILFS_I(inode); 445 int err; 446 447 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 448 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 449 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 450 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 451 inode->i_size = le64_to_cpu(raw_inode->i_size); 452 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 453 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 454 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 455 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 456 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 457 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 458 if (inode->i_nlink == 0) 459 return -ESTALE; /* this inode is deleted */ 460 461 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 462 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 463 #if 0 464 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 465 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 466 0 : le32_to_cpu(raw_inode->i_dir_acl); 467 #endif 468 ii->i_dir_start_lookup = 0; 469 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 470 471 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 472 S_ISLNK(inode->i_mode)) { 473 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 474 if (err < 0) 475 return err; 476 set_bit(NILFS_I_BMAP, &ii->i_state); 477 /* No lock is needed; iget() ensures it. */ 478 } 479 return 0; 480 } 481 482 static int __nilfs_read_inode(struct super_block *sb, 483 struct nilfs_root *root, unsigned long ino, 484 struct inode *inode) 485 { 486 struct the_nilfs *nilfs = sb->s_fs_info; 487 struct buffer_head *bh; 488 struct nilfs_inode *raw_inode; 489 int err; 490 491 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 492 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 493 if (unlikely(err)) 494 goto bad_inode; 495 496 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 497 498 err = nilfs_read_inode_common(inode, raw_inode); 499 if (err) 500 goto failed_unmap; 501 502 if (S_ISREG(inode->i_mode)) { 503 inode->i_op = &nilfs_file_inode_operations; 504 inode->i_fop = &nilfs_file_operations; 505 inode->i_mapping->a_ops = &nilfs_aops; 506 } else if (S_ISDIR(inode->i_mode)) { 507 inode->i_op = &nilfs_dir_inode_operations; 508 inode->i_fop = &nilfs_dir_operations; 509 inode->i_mapping->a_ops = &nilfs_aops; 510 } else if (S_ISLNK(inode->i_mode)) { 511 inode->i_op = &nilfs_symlink_inode_operations; 512 inode_nohighmem(inode); 513 inode->i_mapping->a_ops = &nilfs_aops; 514 } else { 515 inode->i_op = &nilfs_special_inode_operations; 516 init_special_inode( 517 inode, inode->i_mode, 518 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 519 } 520 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 521 brelse(bh); 522 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 523 nilfs_set_inode_flags(inode); 524 mapping_set_gfp_mask(inode->i_mapping, 525 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 526 return 0; 527 528 failed_unmap: 529 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 530 brelse(bh); 531 532 bad_inode: 533 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 534 return err; 535 } 536 537 static int nilfs_iget_test(struct inode *inode, void *opaque) 538 { 539 struct nilfs_iget_args *args = opaque; 540 struct nilfs_inode_info *ii; 541 542 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 543 return 0; 544 545 ii = NILFS_I(inode); 546 if (test_bit(NILFS_I_BTNC, &ii->i_state)) { 547 if (!args->for_btnc) 548 return 0; 549 } else if (args->for_btnc) { 550 return 0; 551 } 552 if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { 553 if (!args->for_shadow) 554 return 0; 555 } else if (args->for_shadow) { 556 return 0; 557 } 558 559 if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 560 return !args->for_gc; 561 562 return args->for_gc && args->cno == ii->i_cno; 563 } 564 565 static int nilfs_iget_set(struct inode *inode, void *opaque) 566 { 567 struct nilfs_iget_args *args = opaque; 568 569 inode->i_ino = args->ino; 570 NILFS_I(inode)->i_cno = args->cno; 571 NILFS_I(inode)->i_root = args->root; 572 if (args->root && args->ino == NILFS_ROOT_INO) 573 nilfs_get_root(args->root); 574 575 if (args->for_gc) 576 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); 577 if (args->for_btnc) 578 NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); 579 if (args->for_shadow) 580 NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); 581 return 0; 582 } 583 584 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 585 unsigned long ino) 586 { 587 struct nilfs_iget_args args = { 588 .ino = ino, .root = root, .cno = 0, .for_gc = false, 589 .for_btnc = false, .for_shadow = false 590 }; 591 592 return ilookup5(sb, ino, nilfs_iget_test, &args); 593 } 594 595 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 596 unsigned long ino) 597 { 598 struct nilfs_iget_args args = { 599 .ino = ino, .root = root, .cno = 0, .for_gc = false, 600 .for_btnc = false, .for_shadow = false 601 }; 602 603 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 604 } 605 606 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 607 unsigned long ino) 608 { 609 struct inode *inode; 610 int err; 611 612 inode = nilfs_iget_locked(sb, root, ino); 613 if (unlikely(!inode)) 614 return ERR_PTR(-ENOMEM); 615 if (!(inode->i_state & I_NEW)) 616 return inode; 617 618 err = __nilfs_read_inode(sb, root, ino, inode); 619 if (unlikely(err)) { 620 iget_failed(inode); 621 return ERR_PTR(err); 622 } 623 unlock_new_inode(inode); 624 return inode; 625 } 626 627 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 628 __u64 cno) 629 { 630 struct nilfs_iget_args args = { 631 .ino = ino, .root = NULL, .cno = cno, .for_gc = true, 632 .for_btnc = false, .for_shadow = false 633 }; 634 struct inode *inode; 635 int err; 636 637 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 638 if (unlikely(!inode)) 639 return ERR_PTR(-ENOMEM); 640 if (!(inode->i_state & I_NEW)) 641 return inode; 642 643 err = nilfs_init_gcinode(inode); 644 if (unlikely(err)) { 645 iget_failed(inode); 646 return ERR_PTR(err); 647 } 648 unlock_new_inode(inode); 649 return inode; 650 } 651 652 /** 653 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode 654 * @inode: inode object 655 * 656 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, 657 * or does nothing if the inode already has it. This function allocates 658 * an additional inode to maintain page cache of B-tree nodes one-on-one. 659 * 660 * Return Value: On success, 0 is returned. On errors, one of the following 661 * negative error code is returned. 662 * 663 * %-ENOMEM - Insufficient memory available. 664 */ 665 int nilfs_attach_btree_node_cache(struct inode *inode) 666 { 667 struct nilfs_inode_info *ii = NILFS_I(inode); 668 struct inode *btnc_inode; 669 struct nilfs_iget_args args; 670 671 if (ii->i_assoc_inode) 672 return 0; 673 674 args.ino = inode->i_ino; 675 args.root = ii->i_root; 676 args.cno = ii->i_cno; 677 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; 678 args.for_btnc = true; 679 args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; 680 681 btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 682 nilfs_iget_set, &args); 683 if (unlikely(!btnc_inode)) 684 return -ENOMEM; 685 if (btnc_inode->i_state & I_NEW) { 686 nilfs_init_btnc_inode(btnc_inode); 687 unlock_new_inode(btnc_inode); 688 } 689 NILFS_I(btnc_inode)->i_assoc_inode = inode; 690 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; 691 ii->i_assoc_inode = btnc_inode; 692 693 return 0; 694 } 695 696 /** 697 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode 698 * @inode: inode object 699 * 700 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its 701 * holder inode bound to @inode, or does nothing if @inode doesn't have it. 702 */ 703 void nilfs_detach_btree_node_cache(struct inode *inode) 704 { 705 struct nilfs_inode_info *ii = NILFS_I(inode); 706 struct inode *btnc_inode = ii->i_assoc_inode; 707 708 if (btnc_inode) { 709 NILFS_I(btnc_inode)->i_assoc_inode = NULL; 710 ii->i_assoc_inode = NULL; 711 iput(btnc_inode); 712 } 713 } 714 715 /** 716 * nilfs_iget_for_shadow - obtain inode for shadow mapping 717 * @inode: inode object that uses shadow mapping 718 * 719 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page 720 * caches for shadow mapping. The page cache for data pages is set up 721 * in one inode and the one for b-tree node pages is set up in the 722 * other inode, which is attached to the former inode. 723 * 724 * Return Value: On success, a pointer to the inode for data pages is 725 * returned. On errors, one of the following negative error code is returned 726 * in a pointer type. 727 * 728 * %-ENOMEM - Insufficient memory available. 729 */ 730 struct inode *nilfs_iget_for_shadow(struct inode *inode) 731 { 732 struct nilfs_iget_args args = { 733 .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, 734 .for_btnc = false, .for_shadow = true 735 }; 736 struct inode *s_inode; 737 int err; 738 739 s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 740 nilfs_iget_set, &args); 741 if (unlikely(!s_inode)) 742 return ERR_PTR(-ENOMEM); 743 if (!(s_inode->i_state & I_NEW)) 744 return inode; 745 746 NILFS_I(s_inode)->i_flags = 0; 747 memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); 748 mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); 749 750 err = nilfs_attach_btree_node_cache(s_inode); 751 if (unlikely(err)) { 752 iget_failed(s_inode); 753 return ERR_PTR(err); 754 } 755 unlock_new_inode(s_inode); 756 return s_inode; 757 } 758 759 void nilfs_write_inode_common(struct inode *inode, 760 struct nilfs_inode *raw_inode, int has_bmap) 761 { 762 struct nilfs_inode_info *ii = NILFS_I(inode); 763 764 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 765 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 766 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 767 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 768 raw_inode->i_size = cpu_to_le64(inode->i_size); 769 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 770 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 771 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 772 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 773 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 774 775 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 776 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 777 778 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 779 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 780 781 /* zero-fill unused portion in the case of super root block */ 782 raw_inode->i_xattr = 0; 783 raw_inode->i_pad = 0; 784 memset((void *)raw_inode + sizeof(*raw_inode), 0, 785 nilfs->ns_inode_size - sizeof(*raw_inode)); 786 } 787 788 if (has_bmap) 789 nilfs_bmap_write(ii->i_bmap, raw_inode); 790 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 791 raw_inode->i_device_code = 792 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 793 /* 794 * When extending inode, nilfs->ns_inode_size should be checked 795 * for substitutions of appended fields. 796 */ 797 } 798 799 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 800 { 801 ino_t ino = inode->i_ino; 802 struct nilfs_inode_info *ii = NILFS_I(inode); 803 struct inode *ifile = ii->i_root->ifile; 804 struct nilfs_inode *raw_inode; 805 806 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 807 808 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 809 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 810 if (flags & I_DIRTY_DATASYNC) 811 set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 812 813 nilfs_write_inode_common(inode, raw_inode, 0); 814 /* 815 * XXX: call with has_bmap = 0 is a workaround to avoid 816 * deadlock of bmap. This delays update of i_bmap to just 817 * before writing. 818 */ 819 820 nilfs_ifile_unmap_inode(ifile, ino, ibh); 821 } 822 823 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 824 825 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 826 unsigned long from) 827 { 828 __u64 b; 829 int ret; 830 831 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 832 return; 833 repeat: 834 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 835 if (ret == -ENOENT) 836 return; 837 else if (ret < 0) 838 goto failed; 839 840 if (b < from) 841 return; 842 843 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 844 ret = nilfs_bmap_truncate(ii->i_bmap, b); 845 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 846 if (!ret || (ret == -ENOMEM && 847 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 848 goto repeat; 849 850 failed: 851 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", 852 ret, ii->vfs_inode.i_ino); 853 } 854 855 void nilfs_truncate(struct inode *inode) 856 { 857 unsigned long blkoff; 858 unsigned int blocksize; 859 struct nilfs_transaction_info ti; 860 struct super_block *sb = inode->i_sb; 861 struct nilfs_inode_info *ii = NILFS_I(inode); 862 863 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 864 return; 865 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 866 return; 867 868 blocksize = sb->s_blocksize; 869 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 870 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 871 872 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 873 874 nilfs_truncate_bmap(ii, blkoff); 875 876 inode->i_mtime = inode->i_ctime = current_time(inode); 877 if (IS_SYNC(inode)) 878 nilfs_set_transaction_flag(NILFS_TI_SYNC); 879 880 nilfs_mark_inode_dirty(inode); 881 nilfs_set_file_dirty(inode, 0); 882 nilfs_transaction_commit(sb); 883 /* 884 * May construct a logical segment and may fail in sync mode. 885 * But truncate has no return value. 886 */ 887 } 888 889 static void nilfs_clear_inode(struct inode *inode) 890 { 891 struct nilfs_inode_info *ii = NILFS_I(inode); 892 893 /* 894 * Free resources allocated in nilfs_read_inode(), here. 895 */ 896 BUG_ON(!list_empty(&ii->i_dirty)); 897 brelse(ii->i_bh); 898 ii->i_bh = NULL; 899 900 if (nilfs_is_metadata_file_inode(inode)) 901 nilfs_mdt_clear(inode); 902 903 if (test_bit(NILFS_I_BMAP, &ii->i_state)) 904 nilfs_bmap_clear(ii->i_bmap); 905 906 if (!test_bit(NILFS_I_BTNC, &ii->i_state)) 907 nilfs_detach_btree_node_cache(inode); 908 909 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 910 nilfs_put_root(ii->i_root); 911 } 912 913 void nilfs_evict_inode(struct inode *inode) 914 { 915 struct nilfs_transaction_info ti; 916 struct super_block *sb = inode->i_sb; 917 struct nilfs_inode_info *ii = NILFS_I(inode); 918 int ret; 919 920 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 921 truncate_inode_pages_final(&inode->i_data); 922 clear_inode(inode); 923 nilfs_clear_inode(inode); 924 return; 925 } 926 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 927 928 truncate_inode_pages_final(&inode->i_data); 929 930 /* TODO: some of the following operations may fail. */ 931 nilfs_truncate_bmap(ii, 0); 932 nilfs_mark_inode_dirty(inode); 933 clear_inode(inode); 934 935 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 936 if (!ret) 937 atomic64_dec(&ii->i_root->inodes_count); 938 939 nilfs_clear_inode(inode); 940 941 if (IS_SYNC(inode)) 942 nilfs_set_transaction_flag(NILFS_TI_SYNC); 943 nilfs_transaction_commit(sb); 944 /* 945 * May construct a logical segment and may fail in sync mode. 946 * But delete_inode has no return value. 947 */ 948 } 949 950 int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 951 struct iattr *iattr) 952 { 953 struct nilfs_transaction_info ti; 954 struct inode *inode = d_inode(dentry); 955 struct super_block *sb = inode->i_sb; 956 int err; 957 958 err = setattr_prepare(&init_user_ns, dentry, iattr); 959 if (err) 960 return err; 961 962 err = nilfs_transaction_begin(sb, &ti, 0); 963 if (unlikely(err)) 964 return err; 965 966 if ((iattr->ia_valid & ATTR_SIZE) && 967 iattr->ia_size != i_size_read(inode)) { 968 inode_dio_wait(inode); 969 truncate_setsize(inode, iattr->ia_size); 970 nilfs_truncate(inode); 971 } 972 973 setattr_copy(&init_user_ns, inode, iattr); 974 mark_inode_dirty(inode); 975 976 if (iattr->ia_valid & ATTR_MODE) { 977 err = nilfs_acl_chmod(inode); 978 if (unlikely(err)) 979 goto out_err; 980 } 981 982 return nilfs_transaction_commit(sb); 983 984 out_err: 985 nilfs_transaction_abort(sb); 986 return err; 987 } 988 989 int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode, 990 int mask) 991 { 992 struct nilfs_root *root = NILFS_I(inode)->i_root; 993 994 if ((mask & MAY_WRITE) && root && 995 root->cno != NILFS_CPTREE_CURRENT_CNO) 996 return -EROFS; /* snapshot is not writable */ 997 998 return generic_permission(&init_user_ns, inode, mask); 999 } 1000 1001 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 1002 { 1003 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1004 struct nilfs_inode_info *ii = NILFS_I(inode); 1005 int err; 1006 1007 spin_lock(&nilfs->ns_inode_lock); 1008 if (ii->i_bh == NULL) { 1009 spin_unlock(&nilfs->ns_inode_lock); 1010 err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 1011 inode->i_ino, pbh); 1012 if (unlikely(err)) 1013 return err; 1014 spin_lock(&nilfs->ns_inode_lock); 1015 if (ii->i_bh == NULL) 1016 ii->i_bh = *pbh; 1017 else { 1018 brelse(*pbh); 1019 *pbh = ii->i_bh; 1020 } 1021 } else 1022 *pbh = ii->i_bh; 1023 1024 get_bh(*pbh); 1025 spin_unlock(&nilfs->ns_inode_lock); 1026 return 0; 1027 } 1028 1029 int nilfs_inode_dirty(struct inode *inode) 1030 { 1031 struct nilfs_inode_info *ii = NILFS_I(inode); 1032 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1033 int ret = 0; 1034 1035 if (!list_empty(&ii->i_dirty)) { 1036 spin_lock(&nilfs->ns_inode_lock); 1037 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 1038 test_bit(NILFS_I_BUSY, &ii->i_state); 1039 spin_unlock(&nilfs->ns_inode_lock); 1040 } 1041 return ret; 1042 } 1043 1044 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) 1045 { 1046 struct nilfs_inode_info *ii = NILFS_I(inode); 1047 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1048 1049 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 1050 1051 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 1052 return 0; 1053 1054 spin_lock(&nilfs->ns_inode_lock); 1055 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 1056 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 1057 /* 1058 * Because this routine may race with nilfs_dispose_list(), 1059 * we have to check NILFS_I_QUEUED here, too. 1060 */ 1061 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 1062 /* 1063 * This will happen when somebody is freeing 1064 * this inode. 1065 */ 1066 nilfs_warn(inode->i_sb, 1067 "cannot set file dirty (ino=%lu): the file is being freed", 1068 inode->i_ino); 1069 spin_unlock(&nilfs->ns_inode_lock); 1070 return -EINVAL; /* 1071 * NILFS_I_DIRTY may remain for 1072 * freeing inode. 1073 */ 1074 } 1075 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 1076 set_bit(NILFS_I_QUEUED, &ii->i_state); 1077 } 1078 spin_unlock(&nilfs->ns_inode_lock); 1079 return 0; 1080 } 1081 1082 int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 1083 { 1084 struct buffer_head *ibh; 1085 int err; 1086 1087 err = nilfs_load_inode_block(inode, &ibh); 1088 if (unlikely(err)) { 1089 nilfs_warn(inode->i_sb, 1090 "cannot mark inode dirty (ino=%lu): error %d loading inode block", 1091 inode->i_ino, err); 1092 return err; 1093 } 1094 nilfs_update_inode(inode, ibh, flags); 1095 mark_buffer_dirty(ibh); 1096 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 1097 brelse(ibh); 1098 return 0; 1099 } 1100 1101 /** 1102 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 1103 * @inode: inode of the file to be registered. 1104 * @flags: flags to determine the dirty state of the inode 1105 * 1106 * nilfs_dirty_inode() loads a inode block containing the specified 1107 * @inode and copies data from a nilfs_inode to a corresponding inode 1108 * entry in the inode block. This operation is excluded from the segment 1109 * construction. This function can be called both as a single operation 1110 * and as a part of indivisible file operations. 1111 */ 1112 void nilfs_dirty_inode(struct inode *inode, int flags) 1113 { 1114 struct nilfs_transaction_info ti; 1115 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 1116 1117 if (is_bad_inode(inode)) { 1118 nilfs_warn(inode->i_sb, 1119 "tried to mark bad_inode dirty. ignored."); 1120 dump_stack(); 1121 return; 1122 } 1123 if (mdi) { 1124 nilfs_mdt_mark_dirty(inode); 1125 return; 1126 } 1127 nilfs_transaction_begin(inode->i_sb, &ti, 0); 1128 __nilfs_mark_inode_dirty(inode, flags); 1129 nilfs_transaction_commit(inode->i_sb); /* never fails */ 1130 } 1131 1132 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1133 __u64 start, __u64 len) 1134 { 1135 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1136 __u64 logical = 0, phys = 0, size = 0; 1137 __u32 flags = 0; 1138 loff_t isize; 1139 sector_t blkoff, end_blkoff; 1140 sector_t delalloc_blkoff; 1141 unsigned long delalloc_blklen; 1142 unsigned int blkbits = inode->i_blkbits; 1143 int ret, n; 1144 1145 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 1146 if (ret) 1147 return ret; 1148 1149 inode_lock(inode); 1150 1151 isize = i_size_read(inode); 1152 1153 blkoff = start >> blkbits; 1154 end_blkoff = (start + len - 1) >> blkbits; 1155 1156 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1157 &delalloc_blkoff); 1158 1159 do { 1160 __u64 blkphy; 1161 unsigned int maxblocks; 1162 1163 if (delalloc_blklen && blkoff == delalloc_blkoff) { 1164 if (size) { 1165 /* End of the current extent */ 1166 ret = fiemap_fill_next_extent( 1167 fieinfo, logical, phys, size, flags); 1168 if (ret) 1169 break; 1170 } 1171 if (blkoff > end_blkoff) 1172 break; 1173 1174 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1175 logical = blkoff << blkbits; 1176 phys = 0; 1177 size = delalloc_blklen << blkbits; 1178 1179 blkoff = delalloc_blkoff + delalloc_blklen; 1180 delalloc_blklen = nilfs_find_uncommitted_extent( 1181 inode, blkoff, &delalloc_blkoff); 1182 continue; 1183 } 1184 1185 /* 1186 * Limit the number of blocks that we look up so as 1187 * not to get into the next delayed allocation extent. 1188 */ 1189 maxblocks = INT_MAX; 1190 if (delalloc_blklen) 1191 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1192 maxblocks); 1193 blkphy = 0; 1194 1195 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1196 n = nilfs_bmap_lookup_contig( 1197 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1198 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1199 1200 if (n < 0) { 1201 int past_eof; 1202 1203 if (unlikely(n != -ENOENT)) 1204 break; /* error */ 1205 1206 /* HOLE */ 1207 blkoff++; 1208 past_eof = ((blkoff << blkbits) >= isize); 1209 1210 if (size) { 1211 /* End of the current extent */ 1212 1213 if (past_eof) 1214 flags |= FIEMAP_EXTENT_LAST; 1215 1216 ret = fiemap_fill_next_extent( 1217 fieinfo, logical, phys, size, flags); 1218 if (ret) 1219 break; 1220 size = 0; 1221 } 1222 if (blkoff > end_blkoff || past_eof) 1223 break; 1224 } else { 1225 if (size) { 1226 if (phys && blkphy << blkbits == phys + size) { 1227 /* The current extent goes on */ 1228 size += n << blkbits; 1229 } else { 1230 /* Terminate the current extent */ 1231 ret = fiemap_fill_next_extent( 1232 fieinfo, logical, phys, size, 1233 flags); 1234 if (ret || blkoff > end_blkoff) 1235 break; 1236 1237 /* Start another extent */ 1238 flags = FIEMAP_EXTENT_MERGED; 1239 logical = blkoff << blkbits; 1240 phys = blkphy << blkbits; 1241 size = n << blkbits; 1242 } 1243 } else { 1244 /* Start a new extent */ 1245 flags = FIEMAP_EXTENT_MERGED; 1246 logical = blkoff << blkbits; 1247 phys = blkphy << blkbits; 1248 size = n << blkbits; 1249 } 1250 blkoff += n; 1251 } 1252 cond_resched(); 1253 } while (true); 1254 1255 /* If ret is 1 then we just hit the end of the extent array */ 1256 if (ret == 1) 1257 ret = 0; 1258 1259 inode_unlock(inode); 1260 return ret; 1261 } 1262