1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS inode operations. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/buffer_head.h> 12 #include <linux/gfp.h> 13 #include <linux/mpage.h> 14 #include <linux/pagemap.h> 15 #include <linux/writeback.h> 16 #include <linux/uio.h> 17 #include <linux/fiemap.h> 18 #include "nilfs.h" 19 #include "btnode.h" 20 #include "segment.h" 21 #include "page.h" 22 #include "mdt.h" 23 #include "cpfile.h" 24 #include "ifile.h" 25 26 /** 27 * struct nilfs_iget_args - arguments used during comparison between inodes 28 * @ino: inode number 29 * @cno: checkpoint number 30 * @root: pointer on NILFS root object (mounted checkpoint) 31 * @for_gc: inode for GC flag 32 * @for_btnc: inode for B-tree node cache flag 33 */ 34 struct nilfs_iget_args { 35 u64 ino; 36 __u64 cno; 37 struct nilfs_root *root; 38 bool for_gc; 39 bool for_btnc; 40 }; 41 42 static int nilfs_iget_test(struct inode *inode, void *opaque); 43 44 void nilfs_inode_add_blocks(struct inode *inode, int n) 45 { 46 struct nilfs_root *root = NILFS_I(inode)->i_root; 47 48 inode_add_bytes(inode, i_blocksize(inode) * n); 49 if (root) 50 atomic64_add(n, &root->blocks_count); 51 } 52 53 void nilfs_inode_sub_blocks(struct inode *inode, int n) 54 { 55 struct nilfs_root *root = NILFS_I(inode)->i_root; 56 57 inode_sub_bytes(inode, i_blocksize(inode) * n); 58 if (root) 59 atomic64_sub(n, &root->blocks_count); 60 } 61 62 /** 63 * nilfs_get_block() - get a file block on the filesystem (callback function) 64 * @inode - inode struct of the target file 65 * @blkoff - file block number 66 * @bh_result - buffer head to be mapped on 67 * @create - indicate whether allocating the block or not when it has not 68 * been allocated yet. 69 * 70 * This function does not issue actual read request of the specified data 71 * block. It is done by VFS. 72 */ 73 int nilfs_get_block(struct inode *inode, sector_t blkoff, 74 struct buffer_head *bh_result, int create) 75 { 76 struct nilfs_inode_info *ii = NILFS_I(inode); 77 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 78 __u64 blknum = 0; 79 int err = 0, ret; 80 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; 81 82 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 83 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 84 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 85 if (ret >= 0) { /* found */ 86 map_bh(bh_result, inode->i_sb, blknum); 87 if (ret > 0) 88 bh_result->b_size = (ret << inode->i_blkbits); 89 goto out; 90 } 91 /* data block was not found */ 92 if (ret == -ENOENT && create) { 93 struct nilfs_transaction_info ti; 94 95 bh_result->b_blocknr = 0; 96 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 97 if (unlikely(err)) 98 goto out; 99 err = nilfs_bmap_insert(ii->i_bmap, blkoff, 100 (unsigned long)bh_result); 101 if (unlikely(err != 0)) { 102 if (err == -EEXIST) { 103 /* 104 * The get_block() function could be called 105 * from multiple callers for an inode. 106 * However, the page having this block must 107 * be locked in this case. 108 */ 109 nilfs_warn(inode->i_sb, 110 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", 111 __func__, inode->i_ino, 112 (unsigned long long)blkoff); 113 err = 0; 114 } 115 nilfs_transaction_abort(inode->i_sb); 116 goto out; 117 } 118 nilfs_mark_inode_dirty_sync(inode); 119 nilfs_transaction_commit(inode->i_sb); /* never fails */ 120 /* Error handling should be detailed */ 121 set_buffer_new(bh_result); 122 set_buffer_delay(bh_result); 123 map_bh(bh_result, inode->i_sb, 0); 124 /* Disk block number must be changed to proper value */ 125 126 } else if (ret == -ENOENT) { 127 /* 128 * not found is not error (e.g. hole); must return without 129 * the mapped state flag. 130 */ 131 ; 132 } else { 133 err = ret; 134 } 135 136 out: 137 return err; 138 } 139 140 /** 141 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 142 * address_space_operations. 143 * @file - file struct of the file to be read 144 * @page - the page to be read 145 */ 146 static int nilfs_readpage(struct file *file, struct page *page) 147 { 148 return mpage_readpage(page, nilfs_get_block); 149 } 150 151 static void nilfs_readahead(struct readahead_control *rac) 152 { 153 mpage_readahead(rac, nilfs_get_block); 154 } 155 156 static int nilfs_writepages(struct address_space *mapping, 157 struct writeback_control *wbc) 158 { 159 struct inode *inode = mapping->host; 160 int err = 0; 161 162 if (sb_rdonly(inode->i_sb)) { 163 nilfs_clear_dirty_pages(mapping, false); 164 return -EROFS; 165 } 166 167 if (wbc->sync_mode == WB_SYNC_ALL) 168 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 169 wbc->range_start, 170 wbc->range_end); 171 return err; 172 } 173 174 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 175 { 176 struct inode *inode = page->mapping->host; 177 int err; 178 179 if (sb_rdonly(inode->i_sb)) { 180 /* 181 * It means that filesystem was remounted in read-only 182 * mode because of error or metadata corruption. But we 183 * have dirty pages that try to be flushed in background. 184 * So, here we simply discard this dirty page. 185 */ 186 nilfs_clear_dirty_page(page, false); 187 unlock_page(page); 188 return -EROFS; 189 } 190 191 redirty_page_for_writepage(wbc, page); 192 unlock_page(page); 193 194 if (wbc->sync_mode == WB_SYNC_ALL) { 195 err = nilfs_construct_segment(inode->i_sb); 196 if (unlikely(err)) 197 return err; 198 } else if (wbc->for_reclaim) 199 nilfs_flush_segment(inode->i_sb, inode->i_ino); 200 201 return 0; 202 } 203 204 static bool nilfs_dirty_folio(struct address_space *mapping, 205 struct folio *folio) 206 { 207 struct inode *inode = mapping->host; 208 struct buffer_head *head; 209 unsigned int nr_dirty = 0; 210 bool ret = filemap_dirty_folio(mapping, folio); 211 212 /* 213 * The page may not be locked, eg if called from try_to_unmap_one() 214 */ 215 spin_lock(&mapping->private_lock); 216 head = folio_buffers(folio); 217 if (head) { 218 struct buffer_head *bh = head; 219 220 do { 221 /* Do not mark hole blocks dirty */ 222 if (buffer_dirty(bh) || !buffer_mapped(bh)) 223 continue; 224 225 set_buffer_dirty(bh); 226 nr_dirty++; 227 } while (bh = bh->b_this_page, bh != head); 228 } else if (ret) { 229 nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); 230 } 231 spin_unlock(&mapping->private_lock); 232 233 if (nr_dirty) 234 nilfs_set_file_dirty(inode, nr_dirty); 235 return ret; 236 } 237 238 void nilfs_write_failed(struct address_space *mapping, loff_t to) 239 { 240 struct inode *inode = mapping->host; 241 242 if (to > inode->i_size) { 243 truncate_pagecache(inode, inode->i_size); 244 nilfs_truncate(inode); 245 } 246 } 247 248 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 249 loff_t pos, unsigned len, unsigned flags, 250 struct page **pagep, void **fsdata) 251 252 { 253 struct inode *inode = mapping->host; 254 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 255 256 if (unlikely(err)) 257 return err; 258 259 err = block_write_begin(mapping, pos, len, flags, pagep, 260 nilfs_get_block); 261 if (unlikely(err)) { 262 nilfs_write_failed(mapping, pos + len); 263 nilfs_transaction_abort(inode->i_sb); 264 } 265 return err; 266 } 267 268 static int nilfs_write_end(struct file *file, struct address_space *mapping, 269 loff_t pos, unsigned len, unsigned copied, 270 struct page *page, void *fsdata) 271 { 272 struct inode *inode = mapping->host; 273 unsigned int start = pos & (PAGE_SIZE - 1); 274 unsigned int nr_dirty; 275 int err; 276 277 nr_dirty = nilfs_page_count_clean_buffers(page, start, 278 start + copied); 279 copied = generic_write_end(file, mapping, pos, len, copied, page, 280 fsdata); 281 nilfs_set_file_dirty(inode, nr_dirty); 282 err = nilfs_transaction_commit(inode->i_sb); 283 return err ? : copied; 284 } 285 286 static ssize_t 287 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 288 { 289 struct inode *inode = file_inode(iocb->ki_filp); 290 291 if (iov_iter_rw(iter) == WRITE) 292 return 0; 293 294 /* Needs synchronization with the cleaner */ 295 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); 296 } 297 298 const struct address_space_operations nilfs_aops = { 299 .writepage = nilfs_writepage, 300 .readpage = nilfs_readpage, 301 .writepages = nilfs_writepages, 302 .dirty_folio = nilfs_dirty_folio, 303 .readahead = nilfs_readahead, 304 .write_begin = nilfs_write_begin, 305 .write_end = nilfs_write_end, 306 /* .releasepage = nilfs_releasepage, */ 307 .invalidate_folio = block_invalidate_folio, 308 .direct_IO = nilfs_direct_IO, 309 .is_partially_uptodate = block_is_partially_uptodate, 310 }; 311 312 static int nilfs_insert_inode_locked(struct inode *inode, 313 struct nilfs_root *root, 314 unsigned long ino) 315 { 316 struct nilfs_iget_args args = { 317 .ino = ino, .root = root, .cno = 0, .for_gc = false, 318 .for_btnc = false 319 }; 320 321 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 322 } 323 324 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 325 { 326 struct super_block *sb = dir->i_sb; 327 struct the_nilfs *nilfs = sb->s_fs_info; 328 struct inode *inode; 329 struct nilfs_inode_info *ii; 330 struct nilfs_root *root; 331 int err = -ENOMEM; 332 ino_t ino; 333 334 inode = new_inode(sb); 335 if (unlikely(!inode)) 336 goto failed; 337 338 mapping_set_gfp_mask(inode->i_mapping, 339 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 340 341 root = NILFS_I(dir)->i_root; 342 ii = NILFS_I(inode); 343 ii->i_state = BIT(NILFS_I_NEW); 344 ii->i_root = root; 345 346 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); 347 if (unlikely(err)) 348 goto failed_ifile_create_inode; 349 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 350 351 atomic64_inc(&root->inodes_count); 352 inode_init_owner(&init_user_ns, inode, dir, mode); 353 inode->i_ino = ino; 354 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 355 356 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 357 err = nilfs_bmap_read(ii->i_bmap, NULL); 358 if (err < 0) 359 goto failed_after_creation; 360 361 set_bit(NILFS_I_BMAP, &ii->i_state); 362 /* No lock is needed; iget() ensures it. */ 363 } 364 365 ii->i_flags = nilfs_mask_flags( 366 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 367 368 /* ii->i_file_acl = 0; */ 369 /* ii->i_dir_acl = 0; */ 370 ii->i_dir_start_lookup = 0; 371 nilfs_set_inode_flags(inode); 372 spin_lock(&nilfs->ns_next_gen_lock); 373 inode->i_generation = nilfs->ns_next_generation++; 374 spin_unlock(&nilfs->ns_next_gen_lock); 375 if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 376 err = -EIO; 377 goto failed_after_creation; 378 } 379 380 err = nilfs_init_acl(inode, dir); 381 if (unlikely(err)) 382 /* 383 * Never occur. When supporting nilfs_init_acl(), 384 * proper cancellation of above jobs should be considered. 385 */ 386 goto failed_after_creation; 387 388 return inode; 389 390 failed_after_creation: 391 clear_nlink(inode); 392 if (inode->i_state & I_NEW) 393 unlock_new_inode(inode); 394 iput(inode); /* 395 * raw_inode will be deleted through 396 * nilfs_evict_inode(). 397 */ 398 goto failed; 399 400 failed_ifile_create_inode: 401 make_bad_inode(inode); 402 iput(inode); 403 failed: 404 return ERR_PTR(err); 405 } 406 407 void nilfs_set_inode_flags(struct inode *inode) 408 { 409 unsigned int flags = NILFS_I(inode)->i_flags; 410 unsigned int new_fl = 0; 411 412 if (flags & FS_SYNC_FL) 413 new_fl |= S_SYNC; 414 if (flags & FS_APPEND_FL) 415 new_fl |= S_APPEND; 416 if (flags & FS_IMMUTABLE_FL) 417 new_fl |= S_IMMUTABLE; 418 if (flags & FS_NOATIME_FL) 419 new_fl |= S_NOATIME; 420 if (flags & FS_DIRSYNC_FL) 421 new_fl |= S_DIRSYNC; 422 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | 423 S_NOATIME | S_DIRSYNC); 424 } 425 426 int nilfs_read_inode_common(struct inode *inode, 427 struct nilfs_inode *raw_inode) 428 { 429 struct nilfs_inode_info *ii = NILFS_I(inode); 430 int err; 431 432 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 433 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 434 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 435 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 436 inode->i_size = le64_to_cpu(raw_inode->i_size); 437 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 438 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 439 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 440 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 441 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 442 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 443 if (inode->i_nlink == 0) 444 return -ESTALE; /* this inode is deleted */ 445 446 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 447 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 448 #if 0 449 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 450 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 451 0 : le32_to_cpu(raw_inode->i_dir_acl); 452 #endif 453 ii->i_dir_start_lookup = 0; 454 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 455 456 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 457 S_ISLNK(inode->i_mode)) { 458 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 459 if (err < 0) 460 return err; 461 set_bit(NILFS_I_BMAP, &ii->i_state); 462 /* No lock is needed; iget() ensures it. */ 463 } 464 return 0; 465 } 466 467 static int __nilfs_read_inode(struct super_block *sb, 468 struct nilfs_root *root, unsigned long ino, 469 struct inode *inode) 470 { 471 struct the_nilfs *nilfs = sb->s_fs_info; 472 struct buffer_head *bh; 473 struct nilfs_inode *raw_inode; 474 int err; 475 476 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 477 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 478 if (unlikely(err)) 479 goto bad_inode; 480 481 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 482 483 err = nilfs_read_inode_common(inode, raw_inode); 484 if (err) 485 goto failed_unmap; 486 487 if (S_ISREG(inode->i_mode)) { 488 inode->i_op = &nilfs_file_inode_operations; 489 inode->i_fop = &nilfs_file_operations; 490 inode->i_mapping->a_ops = &nilfs_aops; 491 } else if (S_ISDIR(inode->i_mode)) { 492 inode->i_op = &nilfs_dir_inode_operations; 493 inode->i_fop = &nilfs_dir_operations; 494 inode->i_mapping->a_ops = &nilfs_aops; 495 } else if (S_ISLNK(inode->i_mode)) { 496 inode->i_op = &nilfs_symlink_inode_operations; 497 inode_nohighmem(inode); 498 inode->i_mapping->a_ops = &nilfs_aops; 499 } else { 500 inode->i_op = &nilfs_special_inode_operations; 501 init_special_inode( 502 inode, inode->i_mode, 503 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 504 } 505 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 506 brelse(bh); 507 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 508 nilfs_set_inode_flags(inode); 509 mapping_set_gfp_mask(inode->i_mapping, 510 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 511 return 0; 512 513 failed_unmap: 514 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 515 brelse(bh); 516 517 bad_inode: 518 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 519 return err; 520 } 521 522 static int nilfs_iget_test(struct inode *inode, void *opaque) 523 { 524 struct nilfs_iget_args *args = opaque; 525 struct nilfs_inode_info *ii; 526 527 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 528 return 0; 529 530 ii = NILFS_I(inode); 531 if (test_bit(NILFS_I_BTNC, &ii->i_state)) { 532 if (!args->for_btnc) 533 return 0; 534 } else if (args->for_btnc) { 535 return 0; 536 } 537 538 if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 539 return !args->for_gc; 540 541 return args->for_gc && args->cno == ii->i_cno; 542 } 543 544 static int nilfs_iget_set(struct inode *inode, void *opaque) 545 { 546 struct nilfs_iget_args *args = opaque; 547 548 inode->i_ino = args->ino; 549 NILFS_I(inode)->i_cno = args->cno; 550 NILFS_I(inode)->i_root = args->root; 551 if (args->root && args->ino == NILFS_ROOT_INO) 552 nilfs_get_root(args->root); 553 554 if (args->for_gc) 555 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); 556 if (args->for_btnc) 557 NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); 558 return 0; 559 } 560 561 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 562 unsigned long ino) 563 { 564 struct nilfs_iget_args args = { 565 .ino = ino, .root = root, .cno = 0, .for_gc = false, 566 .for_btnc = false 567 }; 568 569 return ilookup5(sb, ino, nilfs_iget_test, &args); 570 } 571 572 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 573 unsigned long ino) 574 { 575 struct nilfs_iget_args args = { 576 .ino = ino, .root = root, .cno = 0, .for_gc = false, 577 .for_btnc = false 578 }; 579 580 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 581 } 582 583 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 584 unsigned long ino) 585 { 586 struct inode *inode; 587 int err; 588 589 inode = nilfs_iget_locked(sb, root, ino); 590 if (unlikely(!inode)) 591 return ERR_PTR(-ENOMEM); 592 if (!(inode->i_state & I_NEW)) 593 return inode; 594 595 err = __nilfs_read_inode(sb, root, ino, inode); 596 if (unlikely(err)) { 597 iget_failed(inode); 598 return ERR_PTR(err); 599 } 600 unlock_new_inode(inode); 601 return inode; 602 } 603 604 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 605 __u64 cno) 606 { 607 struct nilfs_iget_args args = { 608 .ino = ino, .root = NULL, .cno = cno, .for_gc = true, 609 .for_btnc = false 610 }; 611 struct inode *inode; 612 int err; 613 614 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 615 if (unlikely(!inode)) 616 return ERR_PTR(-ENOMEM); 617 if (!(inode->i_state & I_NEW)) 618 return inode; 619 620 err = nilfs_init_gcinode(inode); 621 if (unlikely(err)) { 622 iget_failed(inode); 623 return ERR_PTR(err); 624 } 625 unlock_new_inode(inode); 626 return inode; 627 } 628 629 /** 630 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode 631 * @inode: inode object 632 * 633 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, 634 * or does nothing if the inode already has it. This function allocates 635 * an additional inode to maintain page cache of B-tree nodes one-on-one. 636 * 637 * Return Value: On success, 0 is returned. On errors, one of the following 638 * negative error code is returned. 639 * 640 * %-ENOMEM - Insufficient memory available. 641 */ 642 int nilfs_attach_btree_node_cache(struct inode *inode) 643 { 644 struct nilfs_inode_info *ii = NILFS_I(inode); 645 struct inode *btnc_inode; 646 struct nilfs_iget_args args; 647 648 if (ii->i_assoc_inode) 649 return 0; 650 651 args.ino = inode->i_ino; 652 args.root = ii->i_root; 653 args.cno = ii->i_cno; 654 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; 655 args.for_btnc = true; 656 657 btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 658 nilfs_iget_set, &args); 659 if (unlikely(!btnc_inode)) 660 return -ENOMEM; 661 if (btnc_inode->i_state & I_NEW) { 662 nilfs_init_btnc_inode(btnc_inode); 663 unlock_new_inode(btnc_inode); 664 } 665 NILFS_I(btnc_inode)->i_assoc_inode = inode; 666 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; 667 ii->i_assoc_inode = btnc_inode; 668 669 return 0; 670 } 671 672 /** 673 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode 674 * @inode: inode object 675 * 676 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its 677 * holder inode bound to @inode, or does nothing if @inode doesn't have it. 678 */ 679 void nilfs_detach_btree_node_cache(struct inode *inode) 680 { 681 struct nilfs_inode_info *ii = NILFS_I(inode); 682 struct inode *btnc_inode = ii->i_assoc_inode; 683 684 if (btnc_inode) { 685 NILFS_I(btnc_inode)->i_assoc_inode = NULL; 686 ii->i_assoc_inode = NULL; 687 iput(btnc_inode); 688 } 689 } 690 691 void nilfs_write_inode_common(struct inode *inode, 692 struct nilfs_inode *raw_inode, int has_bmap) 693 { 694 struct nilfs_inode_info *ii = NILFS_I(inode); 695 696 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 697 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 698 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 699 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 700 raw_inode->i_size = cpu_to_le64(inode->i_size); 701 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 702 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 703 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 704 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 705 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 706 707 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 708 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 709 710 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 711 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 712 713 /* zero-fill unused portion in the case of super root block */ 714 raw_inode->i_xattr = 0; 715 raw_inode->i_pad = 0; 716 memset((void *)raw_inode + sizeof(*raw_inode), 0, 717 nilfs->ns_inode_size - sizeof(*raw_inode)); 718 } 719 720 if (has_bmap) 721 nilfs_bmap_write(ii->i_bmap, raw_inode); 722 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 723 raw_inode->i_device_code = 724 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 725 /* 726 * When extending inode, nilfs->ns_inode_size should be checked 727 * for substitutions of appended fields. 728 */ 729 } 730 731 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 732 { 733 ino_t ino = inode->i_ino; 734 struct nilfs_inode_info *ii = NILFS_I(inode); 735 struct inode *ifile = ii->i_root->ifile; 736 struct nilfs_inode *raw_inode; 737 738 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 739 740 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 741 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 742 if (flags & I_DIRTY_DATASYNC) 743 set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 744 745 nilfs_write_inode_common(inode, raw_inode, 0); 746 /* 747 * XXX: call with has_bmap = 0 is a workaround to avoid 748 * deadlock of bmap. This delays update of i_bmap to just 749 * before writing. 750 */ 751 752 nilfs_ifile_unmap_inode(ifile, ino, ibh); 753 } 754 755 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 756 757 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 758 unsigned long from) 759 { 760 __u64 b; 761 int ret; 762 763 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 764 return; 765 repeat: 766 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 767 if (ret == -ENOENT) 768 return; 769 else if (ret < 0) 770 goto failed; 771 772 if (b < from) 773 return; 774 775 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 776 ret = nilfs_bmap_truncate(ii->i_bmap, b); 777 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 778 if (!ret || (ret == -ENOMEM && 779 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 780 goto repeat; 781 782 failed: 783 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", 784 ret, ii->vfs_inode.i_ino); 785 } 786 787 void nilfs_truncate(struct inode *inode) 788 { 789 unsigned long blkoff; 790 unsigned int blocksize; 791 struct nilfs_transaction_info ti; 792 struct super_block *sb = inode->i_sb; 793 struct nilfs_inode_info *ii = NILFS_I(inode); 794 795 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 796 return; 797 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 798 return; 799 800 blocksize = sb->s_blocksize; 801 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 802 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 803 804 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 805 806 nilfs_truncate_bmap(ii, blkoff); 807 808 inode->i_mtime = inode->i_ctime = current_time(inode); 809 if (IS_SYNC(inode)) 810 nilfs_set_transaction_flag(NILFS_TI_SYNC); 811 812 nilfs_mark_inode_dirty(inode); 813 nilfs_set_file_dirty(inode, 0); 814 nilfs_transaction_commit(sb); 815 /* 816 * May construct a logical segment and may fail in sync mode. 817 * But truncate has no return value. 818 */ 819 } 820 821 static void nilfs_clear_inode(struct inode *inode) 822 { 823 struct nilfs_inode_info *ii = NILFS_I(inode); 824 825 /* 826 * Free resources allocated in nilfs_read_inode(), here. 827 */ 828 BUG_ON(!list_empty(&ii->i_dirty)); 829 brelse(ii->i_bh); 830 ii->i_bh = NULL; 831 832 if (nilfs_is_metadata_file_inode(inode)) 833 nilfs_mdt_clear(inode); 834 835 if (test_bit(NILFS_I_BMAP, &ii->i_state)) 836 nilfs_bmap_clear(ii->i_bmap); 837 838 if (!test_bit(NILFS_I_BTNC, &ii->i_state)) 839 nilfs_detach_btree_node_cache(inode); 840 841 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 842 nilfs_put_root(ii->i_root); 843 } 844 845 void nilfs_evict_inode(struct inode *inode) 846 { 847 struct nilfs_transaction_info ti; 848 struct super_block *sb = inode->i_sb; 849 struct nilfs_inode_info *ii = NILFS_I(inode); 850 int ret; 851 852 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 853 truncate_inode_pages_final(&inode->i_data); 854 clear_inode(inode); 855 nilfs_clear_inode(inode); 856 return; 857 } 858 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 859 860 truncate_inode_pages_final(&inode->i_data); 861 862 /* TODO: some of the following operations may fail. */ 863 nilfs_truncate_bmap(ii, 0); 864 nilfs_mark_inode_dirty(inode); 865 clear_inode(inode); 866 867 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 868 if (!ret) 869 atomic64_dec(&ii->i_root->inodes_count); 870 871 nilfs_clear_inode(inode); 872 873 if (IS_SYNC(inode)) 874 nilfs_set_transaction_flag(NILFS_TI_SYNC); 875 nilfs_transaction_commit(sb); 876 /* 877 * May construct a logical segment and may fail in sync mode. 878 * But delete_inode has no return value. 879 */ 880 } 881 882 int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 883 struct iattr *iattr) 884 { 885 struct nilfs_transaction_info ti; 886 struct inode *inode = d_inode(dentry); 887 struct super_block *sb = inode->i_sb; 888 int err; 889 890 err = setattr_prepare(&init_user_ns, dentry, iattr); 891 if (err) 892 return err; 893 894 err = nilfs_transaction_begin(sb, &ti, 0); 895 if (unlikely(err)) 896 return err; 897 898 if ((iattr->ia_valid & ATTR_SIZE) && 899 iattr->ia_size != i_size_read(inode)) { 900 inode_dio_wait(inode); 901 truncate_setsize(inode, iattr->ia_size); 902 nilfs_truncate(inode); 903 } 904 905 setattr_copy(&init_user_ns, inode, iattr); 906 mark_inode_dirty(inode); 907 908 if (iattr->ia_valid & ATTR_MODE) { 909 err = nilfs_acl_chmod(inode); 910 if (unlikely(err)) 911 goto out_err; 912 } 913 914 return nilfs_transaction_commit(sb); 915 916 out_err: 917 nilfs_transaction_abort(sb); 918 return err; 919 } 920 921 int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode, 922 int mask) 923 { 924 struct nilfs_root *root = NILFS_I(inode)->i_root; 925 926 if ((mask & MAY_WRITE) && root && 927 root->cno != NILFS_CPTREE_CURRENT_CNO) 928 return -EROFS; /* snapshot is not writable */ 929 930 return generic_permission(&init_user_ns, inode, mask); 931 } 932 933 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 934 { 935 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 936 struct nilfs_inode_info *ii = NILFS_I(inode); 937 int err; 938 939 spin_lock(&nilfs->ns_inode_lock); 940 if (ii->i_bh == NULL) { 941 spin_unlock(&nilfs->ns_inode_lock); 942 err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 943 inode->i_ino, pbh); 944 if (unlikely(err)) 945 return err; 946 spin_lock(&nilfs->ns_inode_lock); 947 if (ii->i_bh == NULL) 948 ii->i_bh = *pbh; 949 else { 950 brelse(*pbh); 951 *pbh = ii->i_bh; 952 } 953 } else 954 *pbh = ii->i_bh; 955 956 get_bh(*pbh); 957 spin_unlock(&nilfs->ns_inode_lock); 958 return 0; 959 } 960 961 int nilfs_inode_dirty(struct inode *inode) 962 { 963 struct nilfs_inode_info *ii = NILFS_I(inode); 964 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 965 int ret = 0; 966 967 if (!list_empty(&ii->i_dirty)) { 968 spin_lock(&nilfs->ns_inode_lock); 969 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 970 test_bit(NILFS_I_BUSY, &ii->i_state); 971 spin_unlock(&nilfs->ns_inode_lock); 972 } 973 return ret; 974 } 975 976 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) 977 { 978 struct nilfs_inode_info *ii = NILFS_I(inode); 979 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 980 981 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 982 983 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 984 return 0; 985 986 spin_lock(&nilfs->ns_inode_lock); 987 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 988 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 989 /* 990 * Because this routine may race with nilfs_dispose_list(), 991 * we have to check NILFS_I_QUEUED here, too. 992 */ 993 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 994 /* 995 * This will happen when somebody is freeing 996 * this inode. 997 */ 998 nilfs_warn(inode->i_sb, 999 "cannot set file dirty (ino=%lu): the file is being freed", 1000 inode->i_ino); 1001 spin_unlock(&nilfs->ns_inode_lock); 1002 return -EINVAL; /* 1003 * NILFS_I_DIRTY may remain for 1004 * freeing inode. 1005 */ 1006 } 1007 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 1008 set_bit(NILFS_I_QUEUED, &ii->i_state); 1009 } 1010 spin_unlock(&nilfs->ns_inode_lock); 1011 return 0; 1012 } 1013 1014 int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 1015 { 1016 struct buffer_head *ibh; 1017 int err; 1018 1019 err = nilfs_load_inode_block(inode, &ibh); 1020 if (unlikely(err)) { 1021 nilfs_warn(inode->i_sb, 1022 "cannot mark inode dirty (ino=%lu): error %d loading inode block", 1023 inode->i_ino, err); 1024 return err; 1025 } 1026 nilfs_update_inode(inode, ibh, flags); 1027 mark_buffer_dirty(ibh); 1028 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 1029 brelse(ibh); 1030 return 0; 1031 } 1032 1033 /** 1034 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 1035 * @inode: inode of the file to be registered. 1036 * 1037 * nilfs_dirty_inode() loads a inode block containing the specified 1038 * @inode and copies data from a nilfs_inode to a corresponding inode 1039 * entry in the inode block. This operation is excluded from the segment 1040 * construction. This function can be called both as a single operation 1041 * and as a part of indivisible file operations. 1042 */ 1043 void nilfs_dirty_inode(struct inode *inode, int flags) 1044 { 1045 struct nilfs_transaction_info ti; 1046 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 1047 1048 if (is_bad_inode(inode)) { 1049 nilfs_warn(inode->i_sb, 1050 "tried to mark bad_inode dirty. ignored."); 1051 dump_stack(); 1052 return; 1053 } 1054 if (mdi) { 1055 nilfs_mdt_mark_dirty(inode); 1056 return; 1057 } 1058 nilfs_transaction_begin(inode->i_sb, &ti, 0); 1059 __nilfs_mark_inode_dirty(inode, flags); 1060 nilfs_transaction_commit(inode->i_sb); /* never fails */ 1061 } 1062 1063 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1064 __u64 start, __u64 len) 1065 { 1066 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1067 __u64 logical = 0, phys = 0, size = 0; 1068 __u32 flags = 0; 1069 loff_t isize; 1070 sector_t blkoff, end_blkoff; 1071 sector_t delalloc_blkoff; 1072 unsigned long delalloc_blklen; 1073 unsigned int blkbits = inode->i_blkbits; 1074 int ret, n; 1075 1076 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 1077 if (ret) 1078 return ret; 1079 1080 inode_lock(inode); 1081 1082 isize = i_size_read(inode); 1083 1084 blkoff = start >> blkbits; 1085 end_blkoff = (start + len - 1) >> blkbits; 1086 1087 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1088 &delalloc_blkoff); 1089 1090 do { 1091 __u64 blkphy; 1092 unsigned int maxblocks; 1093 1094 if (delalloc_blklen && blkoff == delalloc_blkoff) { 1095 if (size) { 1096 /* End of the current extent */ 1097 ret = fiemap_fill_next_extent( 1098 fieinfo, logical, phys, size, flags); 1099 if (ret) 1100 break; 1101 } 1102 if (blkoff > end_blkoff) 1103 break; 1104 1105 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1106 logical = blkoff << blkbits; 1107 phys = 0; 1108 size = delalloc_blklen << blkbits; 1109 1110 blkoff = delalloc_blkoff + delalloc_blklen; 1111 delalloc_blklen = nilfs_find_uncommitted_extent( 1112 inode, blkoff, &delalloc_blkoff); 1113 continue; 1114 } 1115 1116 /* 1117 * Limit the number of blocks that we look up so as 1118 * not to get into the next delayed allocation extent. 1119 */ 1120 maxblocks = INT_MAX; 1121 if (delalloc_blklen) 1122 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1123 maxblocks); 1124 blkphy = 0; 1125 1126 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1127 n = nilfs_bmap_lookup_contig( 1128 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1129 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1130 1131 if (n < 0) { 1132 int past_eof; 1133 1134 if (unlikely(n != -ENOENT)) 1135 break; /* error */ 1136 1137 /* HOLE */ 1138 blkoff++; 1139 past_eof = ((blkoff << blkbits) >= isize); 1140 1141 if (size) { 1142 /* End of the current extent */ 1143 1144 if (past_eof) 1145 flags |= FIEMAP_EXTENT_LAST; 1146 1147 ret = fiemap_fill_next_extent( 1148 fieinfo, logical, phys, size, flags); 1149 if (ret) 1150 break; 1151 size = 0; 1152 } 1153 if (blkoff > end_blkoff || past_eof) 1154 break; 1155 } else { 1156 if (size) { 1157 if (phys && blkphy << blkbits == phys + size) { 1158 /* The current extent goes on */ 1159 size += n << blkbits; 1160 } else { 1161 /* Terminate the current extent */ 1162 ret = fiemap_fill_next_extent( 1163 fieinfo, logical, phys, size, 1164 flags); 1165 if (ret || blkoff > end_blkoff) 1166 break; 1167 1168 /* Start another extent */ 1169 flags = FIEMAP_EXTENT_MERGED; 1170 logical = blkoff << blkbits; 1171 phys = blkphy << blkbits; 1172 size = n << blkbits; 1173 } 1174 } else { 1175 /* Start a new extent */ 1176 flags = FIEMAP_EXTENT_MERGED; 1177 logical = blkoff << blkbits; 1178 phys = blkphy << blkbits; 1179 size = n << blkbits; 1180 } 1181 blkoff += n; 1182 } 1183 cond_resched(); 1184 } while (true); 1185 1186 /* If ret is 1 then we just hit the end of the extent array */ 1187 if (ret == 1) 1188 ret = 0; 1189 1190 inode_unlock(inode); 1191 return ret; 1192 } 1193