1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS inode operations. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/buffer_head.h> 12 #include <linux/gfp.h> 13 #include <linux/mpage.h> 14 #include <linux/pagemap.h> 15 #include <linux/writeback.h> 16 #include <linux/uio.h> 17 #include <linux/fiemap.h> 18 #include "nilfs.h" 19 #include "btnode.h" 20 #include "segment.h" 21 #include "page.h" 22 #include "mdt.h" 23 #include "cpfile.h" 24 #include "ifile.h" 25 26 /** 27 * struct nilfs_iget_args - arguments used during comparison between inodes 28 * @ino: inode number 29 * @cno: checkpoint number 30 * @root: pointer on NILFS root object (mounted checkpoint) 31 * @for_gc: inode for GC flag 32 * @for_btnc: inode for B-tree node cache flag 33 * @for_shadow: inode for shadowed page cache flag 34 */ 35 struct nilfs_iget_args { 36 u64 ino; 37 __u64 cno; 38 struct nilfs_root *root; 39 bool for_gc; 40 bool for_btnc; 41 bool for_shadow; 42 }; 43 44 static int nilfs_iget_test(struct inode *inode, void *opaque); 45 46 void nilfs_inode_add_blocks(struct inode *inode, int n) 47 { 48 struct nilfs_root *root = NILFS_I(inode)->i_root; 49 50 inode_add_bytes(inode, i_blocksize(inode) * n); 51 if (root) 52 atomic64_add(n, &root->blocks_count); 53 } 54 55 void nilfs_inode_sub_blocks(struct inode *inode, int n) 56 { 57 struct nilfs_root *root = NILFS_I(inode)->i_root; 58 59 inode_sub_bytes(inode, i_blocksize(inode) * n); 60 if (root) 61 atomic64_sub(n, &root->blocks_count); 62 } 63 64 /** 65 * nilfs_get_block() - get a file block on the filesystem (callback function) 66 * @inode: inode struct of the target file 67 * @blkoff: file block number 68 * @bh_result: buffer head to be mapped on 69 * @create: indicate whether allocating the block or not when it has not 70 * been allocated yet. 71 * 72 * This function does not issue actual read request of the specified data 73 * block. It is done by VFS. 74 */ 75 int nilfs_get_block(struct inode *inode, sector_t blkoff, 76 struct buffer_head *bh_result, int create) 77 { 78 struct nilfs_inode_info *ii = NILFS_I(inode); 79 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 80 __u64 blknum = 0; 81 int err = 0, ret; 82 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; 83 84 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 85 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 86 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 87 if (ret >= 0) { /* found */ 88 map_bh(bh_result, inode->i_sb, blknum); 89 if (ret > 0) 90 bh_result->b_size = (ret << inode->i_blkbits); 91 goto out; 92 } 93 /* data block was not found */ 94 if (ret == -ENOENT && create) { 95 struct nilfs_transaction_info ti; 96 97 bh_result->b_blocknr = 0; 98 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 99 if (unlikely(err)) 100 goto out; 101 err = nilfs_bmap_insert(ii->i_bmap, blkoff, 102 (unsigned long)bh_result); 103 if (unlikely(err != 0)) { 104 if (err == -EEXIST) { 105 /* 106 * The get_block() function could be called 107 * from multiple callers for an inode. 108 * However, the page having this block must 109 * be locked in this case. 110 */ 111 nilfs_warn(inode->i_sb, 112 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", 113 __func__, inode->i_ino, 114 (unsigned long long)blkoff); 115 err = 0; 116 } 117 nilfs_transaction_abort(inode->i_sb); 118 goto out; 119 } 120 nilfs_mark_inode_dirty_sync(inode); 121 nilfs_transaction_commit(inode->i_sb); /* never fails */ 122 /* Error handling should be detailed */ 123 set_buffer_new(bh_result); 124 set_buffer_delay(bh_result); 125 map_bh(bh_result, inode->i_sb, 0); 126 /* Disk block number must be changed to proper value */ 127 128 } else if (ret == -ENOENT) { 129 /* 130 * not found is not error (e.g. hole); must return without 131 * the mapped state flag. 132 */ 133 ; 134 } else { 135 err = ret; 136 } 137 138 out: 139 return err; 140 } 141 142 /** 143 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} 144 * address_space_operations. 145 * @file: file struct of the file to be read 146 * @folio: the folio to be read 147 */ 148 static int nilfs_read_folio(struct file *file, struct folio *folio) 149 { 150 return mpage_read_folio(folio, nilfs_get_block); 151 } 152 153 static void nilfs_readahead(struct readahead_control *rac) 154 { 155 mpage_readahead(rac, nilfs_get_block); 156 } 157 158 static int nilfs_writepages(struct address_space *mapping, 159 struct writeback_control *wbc) 160 { 161 struct inode *inode = mapping->host; 162 int err = 0; 163 164 if (sb_rdonly(inode->i_sb)) { 165 nilfs_clear_dirty_pages(mapping, false); 166 return -EROFS; 167 } 168 169 if (wbc->sync_mode == WB_SYNC_ALL) 170 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 171 wbc->range_start, 172 wbc->range_end); 173 return err; 174 } 175 176 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 177 { 178 struct inode *inode = page->mapping->host; 179 int err; 180 181 if (sb_rdonly(inode->i_sb)) { 182 /* 183 * It means that filesystem was remounted in read-only 184 * mode because of error or metadata corruption. But we 185 * have dirty pages that try to be flushed in background. 186 * So, here we simply discard this dirty page. 187 */ 188 nilfs_clear_dirty_page(page, false); 189 unlock_page(page); 190 return -EROFS; 191 } 192 193 redirty_page_for_writepage(wbc, page); 194 unlock_page(page); 195 196 if (wbc->sync_mode == WB_SYNC_ALL) { 197 err = nilfs_construct_segment(inode->i_sb); 198 if (unlikely(err)) 199 return err; 200 } else if (wbc->for_reclaim) 201 nilfs_flush_segment(inode->i_sb, inode->i_ino); 202 203 return 0; 204 } 205 206 static bool nilfs_dirty_folio(struct address_space *mapping, 207 struct folio *folio) 208 { 209 struct inode *inode = mapping->host; 210 struct buffer_head *head; 211 unsigned int nr_dirty = 0; 212 bool ret = filemap_dirty_folio(mapping, folio); 213 214 /* 215 * The page may not be locked, eg if called from try_to_unmap_one() 216 */ 217 spin_lock(&mapping->private_lock); 218 head = folio_buffers(folio); 219 if (head) { 220 struct buffer_head *bh = head; 221 222 do { 223 /* Do not mark hole blocks dirty */ 224 if (buffer_dirty(bh) || !buffer_mapped(bh)) 225 continue; 226 227 set_buffer_dirty(bh); 228 nr_dirty++; 229 } while (bh = bh->b_this_page, bh != head); 230 } else if (ret) { 231 nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); 232 } 233 spin_unlock(&mapping->private_lock); 234 235 if (nr_dirty) 236 nilfs_set_file_dirty(inode, nr_dirty); 237 return ret; 238 } 239 240 void nilfs_write_failed(struct address_space *mapping, loff_t to) 241 { 242 struct inode *inode = mapping->host; 243 244 if (to > inode->i_size) { 245 truncate_pagecache(inode, inode->i_size); 246 nilfs_truncate(inode); 247 } 248 } 249 250 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 251 loff_t pos, unsigned len, 252 struct page **pagep, void **fsdata) 253 254 { 255 struct inode *inode = mapping->host; 256 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 257 258 if (unlikely(err)) 259 return err; 260 261 err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); 262 if (unlikely(err)) { 263 nilfs_write_failed(mapping, pos + len); 264 nilfs_transaction_abort(inode->i_sb); 265 } 266 return err; 267 } 268 269 static int nilfs_write_end(struct file *file, struct address_space *mapping, 270 loff_t pos, unsigned len, unsigned copied, 271 struct page *page, void *fsdata) 272 { 273 struct inode *inode = mapping->host; 274 unsigned int start = pos & (PAGE_SIZE - 1); 275 unsigned int nr_dirty; 276 int err; 277 278 nr_dirty = nilfs_page_count_clean_buffers(page, start, 279 start + copied); 280 copied = generic_write_end(file, mapping, pos, len, copied, page, 281 fsdata); 282 nilfs_set_file_dirty(inode, nr_dirty); 283 err = nilfs_transaction_commit(inode->i_sb); 284 return err ? : copied; 285 } 286 287 static ssize_t 288 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 289 { 290 struct inode *inode = file_inode(iocb->ki_filp); 291 292 if (iov_iter_rw(iter) == WRITE) 293 return 0; 294 295 /* Needs synchronization with the cleaner */ 296 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); 297 } 298 299 const struct address_space_operations nilfs_aops = { 300 .writepage = nilfs_writepage, 301 .read_folio = nilfs_read_folio, 302 .writepages = nilfs_writepages, 303 .dirty_folio = nilfs_dirty_folio, 304 .readahead = nilfs_readahead, 305 .write_begin = nilfs_write_begin, 306 .write_end = nilfs_write_end, 307 .invalidate_folio = block_invalidate_folio, 308 .direct_IO = nilfs_direct_IO, 309 .is_partially_uptodate = block_is_partially_uptodate, 310 }; 311 312 static int nilfs_insert_inode_locked(struct inode *inode, 313 struct nilfs_root *root, 314 unsigned long ino) 315 { 316 struct nilfs_iget_args args = { 317 .ino = ino, .root = root, .cno = 0, .for_gc = false, 318 .for_btnc = false, .for_shadow = false 319 }; 320 321 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 322 } 323 324 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 325 { 326 struct super_block *sb = dir->i_sb; 327 struct the_nilfs *nilfs = sb->s_fs_info; 328 struct inode *inode; 329 struct nilfs_inode_info *ii; 330 struct nilfs_root *root; 331 int err = -ENOMEM; 332 ino_t ino; 333 334 inode = new_inode(sb); 335 if (unlikely(!inode)) 336 goto failed; 337 338 mapping_set_gfp_mask(inode->i_mapping, 339 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 340 341 root = NILFS_I(dir)->i_root; 342 ii = NILFS_I(inode); 343 ii->i_state = BIT(NILFS_I_NEW); 344 ii->i_root = root; 345 346 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); 347 if (unlikely(err)) 348 goto failed_ifile_create_inode; 349 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 350 351 atomic64_inc(&root->inodes_count); 352 inode_init_owner(&init_user_ns, inode, dir, mode); 353 inode->i_ino = ino; 354 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 355 356 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 357 err = nilfs_bmap_read(ii->i_bmap, NULL); 358 if (err < 0) 359 goto failed_after_creation; 360 361 set_bit(NILFS_I_BMAP, &ii->i_state); 362 /* No lock is needed; iget() ensures it. */ 363 } 364 365 ii->i_flags = nilfs_mask_flags( 366 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 367 368 /* ii->i_file_acl = 0; */ 369 /* ii->i_dir_acl = 0; */ 370 ii->i_dir_start_lookup = 0; 371 nilfs_set_inode_flags(inode); 372 spin_lock(&nilfs->ns_next_gen_lock); 373 inode->i_generation = nilfs->ns_next_generation++; 374 spin_unlock(&nilfs->ns_next_gen_lock); 375 if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 376 err = -EIO; 377 goto failed_after_creation; 378 } 379 380 err = nilfs_init_acl(inode, dir); 381 if (unlikely(err)) 382 /* 383 * Never occur. When supporting nilfs_init_acl(), 384 * proper cancellation of above jobs should be considered. 385 */ 386 goto failed_after_creation; 387 388 return inode; 389 390 failed_after_creation: 391 clear_nlink(inode); 392 if (inode->i_state & I_NEW) 393 unlock_new_inode(inode); 394 iput(inode); /* 395 * raw_inode will be deleted through 396 * nilfs_evict_inode(). 397 */ 398 goto failed; 399 400 failed_ifile_create_inode: 401 make_bad_inode(inode); 402 iput(inode); 403 failed: 404 return ERR_PTR(err); 405 } 406 407 void nilfs_set_inode_flags(struct inode *inode) 408 { 409 unsigned int flags = NILFS_I(inode)->i_flags; 410 unsigned int new_fl = 0; 411 412 if (flags & FS_SYNC_FL) 413 new_fl |= S_SYNC; 414 if (flags & FS_APPEND_FL) 415 new_fl |= S_APPEND; 416 if (flags & FS_IMMUTABLE_FL) 417 new_fl |= S_IMMUTABLE; 418 if (flags & FS_NOATIME_FL) 419 new_fl |= S_NOATIME; 420 if (flags & FS_DIRSYNC_FL) 421 new_fl |= S_DIRSYNC; 422 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | 423 S_NOATIME | S_DIRSYNC); 424 } 425 426 int nilfs_read_inode_common(struct inode *inode, 427 struct nilfs_inode *raw_inode) 428 { 429 struct nilfs_inode_info *ii = NILFS_I(inode); 430 int err; 431 432 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 433 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 434 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 435 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 436 inode->i_size = le64_to_cpu(raw_inode->i_size); 437 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 438 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 439 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 440 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 441 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 442 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 443 if (inode->i_nlink == 0) 444 return -ESTALE; /* this inode is deleted */ 445 446 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 447 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 448 #if 0 449 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 450 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 451 0 : le32_to_cpu(raw_inode->i_dir_acl); 452 #endif 453 ii->i_dir_start_lookup = 0; 454 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 455 456 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 457 S_ISLNK(inode->i_mode)) { 458 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 459 if (err < 0) 460 return err; 461 set_bit(NILFS_I_BMAP, &ii->i_state); 462 /* No lock is needed; iget() ensures it. */ 463 } 464 return 0; 465 } 466 467 static int __nilfs_read_inode(struct super_block *sb, 468 struct nilfs_root *root, unsigned long ino, 469 struct inode *inode) 470 { 471 struct the_nilfs *nilfs = sb->s_fs_info; 472 struct buffer_head *bh; 473 struct nilfs_inode *raw_inode; 474 int err; 475 476 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 477 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 478 if (unlikely(err)) 479 goto bad_inode; 480 481 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 482 483 err = nilfs_read_inode_common(inode, raw_inode); 484 if (err) 485 goto failed_unmap; 486 487 if (S_ISREG(inode->i_mode)) { 488 inode->i_op = &nilfs_file_inode_operations; 489 inode->i_fop = &nilfs_file_operations; 490 inode->i_mapping->a_ops = &nilfs_aops; 491 } else if (S_ISDIR(inode->i_mode)) { 492 inode->i_op = &nilfs_dir_inode_operations; 493 inode->i_fop = &nilfs_dir_operations; 494 inode->i_mapping->a_ops = &nilfs_aops; 495 } else if (S_ISLNK(inode->i_mode)) { 496 inode->i_op = &nilfs_symlink_inode_operations; 497 inode_nohighmem(inode); 498 inode->i_mapping->a_ops = &nilfs_aops; 499 } else { 500 inode->i_op = &nilfs_special_inode_operations; 501 init_special_inode( 502 inode, inode->i_mode, 503 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 504 } 505 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 506 brelse(bh); 507 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 508 nilfs_set_inode_flags(inode); 509 mapping_set_gfp_mask(inode->i_mapping, 510 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 511 return 0; 512 513 failed_unmap: 514 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 515 brelse(bh); 516 517 bad_inode: 518 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 519 return err; 520 } 521 522 static int nilfs_iget_test(struct inode *inode, void *opaque) 523 { 524 struct nilfs_iget_args *args = opaque; 525 struct nilfs_inode_info *ii; 526 527 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 528 return 0; 529 530 ii = NILFS_I(inode); 531 if (test_bit(NILFS_I_BTNC, &ii->i_state)) { 532 if (!args->for_btnc) 533 return 0; 534 } else if (args->for_btnc) { 535 return 0; 536 } 537 if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { 538 if (!args->for_shadow) 539 return 0; 540 } else if (args->for_shadow) { 541 return 0; 542 } 543 544 if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 545 return !args->for_gc; 546 547 return args->for_gc && args->cno == ii->i_cno; 548 } 549 550 static int nilfs_iget_set(struct inode *inode, void *opaque) 551 { 552 struct nilfs_iget_args *args = opaque; 553 554 inode->i_ino = args->ino; 555 NILFS_I(inode)->i_cno = args->cno; 556 NILFS_I(inode)->i_root = args->root; 557 if (args->root && args->ino == NILFS_ROOT_INO) 558 nilfs_get_root(args->root); 559 560 if (args->for_gc) 561 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); 562 if (args->for_btnc) 563 NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); 564 if (args->for_shadow) 565 NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); 566 return 0; 567 } 568 569 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 570 unsigned long ino) 571 { 572 struct nilfs_iget_args args = { 573 .ino = ino, .root = root, .cno = 0, .for_gc = false, 574 .for_btnc = false, .for_shadow = false 575 }; 576 577 return ilookup5(sb, ino, nilfs_iget_test, &args); 578 } 579 580 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 581 unsigned long ino) 582 { 583 struct nilfs_iget_args args = { 584 .ino = ino, .root = root, .cno = 0, .for_gc = false, 585 .for_btnc = false, .for_shadow = false 586 }; 587 588 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 589 } 590 591 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 592 unsigned long ino) 593 { 594 struct inode *inode; 595 int err; 596 597 inode = nilfs_iget_locked(sb, root, ino); 598 if (unlikely(!inode)) 599 return ERR_PTR(-ENOMEM); 600 if (!(inode->i_state & I_NEW)) 601 return inode; 602 603 err = __nilfs_read_inode(sb, root, ino, inode); 604 if (unlikely(err)) { 605 iget_failed(inode); 606 return ERR_PTR(err); 607 } 608 unlock_new_inode(inode); 609 return inode; 610 } 611 612 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 613 __u64 cno) 614 { 615 struct nilfs_iget_args args = { 616 .ino = ino, .root = NULL, .cno = cno, .for_gc = true, 617 .for_btnc = false, .for_shadow = false 618 }; 619 struct inode *inode; 620 int err; 621 622 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 623 if (unlikely(!inode)) 624 return ERR_PTR(-ENOMEM); 625 if (!(inode->i_state & I_NEW)) 626 return inode; 627 628 err = nilfs_init_gcinode(inode); 629 if (unlikely(err)) { 630 iget_failed(inode); 631 return ERR_PTR(err); 632 } 633 unlock_new_inode(inode); 634 return inode; 635 } 636 637 /** 638 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode 639 * @inode: inode object 640 * 641 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, 642 * or does nothing if the inode already has it. This function allocates 643 * an additional inode to maintain page cache of B-tree nodes one-on-one. 644 * 645 * Return Value: On success, 0 is returned. On errors, one of the following 646 * negative error code is returned. 647 * 648 * %-ENOMEM - Insufficient memory available. 649 */ 650 int nilfs_attach_btree_node_cache(struct inode *inode) 651 { 652 struct nilfs_inode_info *ii = NILFS_I(inode); 653 struct inode *btnc_inode; 654 struct nilfs_iget_args args; 655 656 if (ii->i_assoc_inode) 657 return 0; 658 659 args.ino = inode->i_ino; 660 args.root = ii->i_root; 661 args.cno = ii->i_cno; 662 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; 663 args.for_btnc = true; 664 args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; 665 666 btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 667 nilfs_iget_set, &args); 668 if (unlikely(!btnc_inode)) 669 return -ENOMEM; 670 if (btnc_inode->i_state & I_NEW) { 671 nilfs_init_btnc_inode(btnc_inode); 672 unlock_new_inode(btnc_inode); 673 } 674 NILFS_I(btnc_inode)->i_assoc_inode = inode; 675 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; 676 ii->i_assoc_inode = btnc_inode; 677 678 return 0; 679 } 680 681 /** 682 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode 683 * @inode: inode object 684 * 685 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its 686 * holder inode bound to @inode, or does nothing if @inode doesn't have it. 687 */ 688 void nilfs_detach_btree_node_cache(struct inode *inode) 689 { 690 struct nilfs_inode_info *ii = NILFS_I(inode); 691 struct inode *btnc_inode = ii->i_assoc_inode; 692 693 if (btnc_inode) { 694 NILFS_I(btnc_inode)->i_assoc_inode = NULL; 695 ii->i_assoc_inode = NULL; 696 iput(btnc_inode); 697 } 698 } 699 700 /** 701 * nilfs_iget_for_shadow - obtain inode for shadow mapping 702 * @inode: inode object that uses shadow mapping 703 * 704 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page 705 * caches for shadow mapping. The page cache for data pages is set up 706 * in one inode and the one for b-tree node pages is set up in the 707 * other inode, which is attached to the former inode. 708 * 709 * Return Value: On success, a pointer to the inode for data pages is 710 * returned. On errors, one of the following negative error code is returned 711 * in a pointer type. 712 * 713 * %-ENOMEM - Insufficient memory available. 714 */ 715 struct inode *nilfs_iget_for_shadow(struct inode *inode) 716 { 717 struct nilfs_iget_args args = { 718 .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, 719 .for_btnc = false, .for_shadow = true 720 }; 721 struct inode *s_inode; 722 int err; 723 724 s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 725 nilfs_iget_set, &args); 726 if (unlikely(!s_inode)) 727 return ERR_PTR(-ENOMEM); 728 if (!(s_inode->i_state & I_NEW)) 729 return inode; 730 731 NILFS_I(s_inode)->i_flags = 0; 732 memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); 733 mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); 734 735 err = nilfs_attach_btree_node_cache(s_inode); 736 if (unlikely(err)) { 737 iget_failed(s_inode); 738 return ERR_PTR(err); 739 } 740 unlock_new_inode(s_inode); 741 return s_inode; 742 } 743 744 void nilfs_write_inode_common(struct inode *inode, 745 struct nilfs_inode *raw_inode, int has_bmap) 746 { 747 struct nilfs_inode_info *ii = NILFS_I(inode); 748 749 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 750 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 751 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 752 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 753 raw_inode->i_size = cpu_to_le64(inode->i_size); 754 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 755 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 756 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 757 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 758 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 759 760 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 761 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 762 763 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 764 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 765 766 /* zero-fill unused portion in the case of super root block */ 767 raw_inode->i_xattr = 0; 768 raw_inode->i_pad = 0; 769 memset((void *)raw_inode + sizeof(*raw_inode), 0, 770 nilfs->ns_inode_size - sizeof(*raw_inode)); 771 } 772 773 if (has_bmap) 774 nilfs_bmap_write(ii->i_bmap, raw_inode); 775 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 776 raw_inode->i_device_code = 777 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 778 /* 779 * When extending inode, nilfs->ns_inode_size should be checked 780 * for substitutions of appended fields. 781 */ 782 } 783 784 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 785 { 786 ino_t ino = inode->i_ino; 787 struct nilfs_inode_info *ii = NILFS_I(inode); 788 struct inode *ifile = ii->i_root->ifile; 789 struct nilfs_inode *raw_inode; 790 791 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 792 793 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 794 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 795 if (flags & I_DIRTY_DATASYNC) 796 set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 797 798 nilfs_write_inode_common(inode, raw_inode, 0); 799 /* 800 * XXX: call with has_bmap = 0 is a workaround to avoid 801 * deadlock of bmap. This delays update of i_bmap to just 802 * before writing. 803 */ 804 805 nilfs_ifile_unmap_inode(ifile, ino, ibh); 806 } 807 808 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 809 810 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 811 unsigned long from) 812 { 813 __u64 b; 814 int ret; 815 816 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 817 return; 818 repeat: 819 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 820 if (ret == -ENOENT) 821 return; 822 else if (ret < 0) 823 goto failed; 824 825 if (b < from) 826 return; 827 828 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 829 ret = nilfs_bmap_truncate(ii->i_bmap, b); 830 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 831 if (!ret || (ret == -ENOMEM && 832 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 833 goto repeat; 834 835 failed: 836 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", 837 ret, ii->vfs_inode.i_ino); 838 } 839 840 void nilfs_truncate(struct inode *inode) 841 { 842 unsigned long blkoff; 843 unsigned int blocksize; 844 struct nilfs_transaction_info ti; 845 struct super_block *sb = inode->i_sb; 846 struct nilfs_inode_info *ii = NILFS_I(inode); 847 848 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 849 return; 850 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 851 return; 852 853 blocksize = sb->s_blocksize; 854 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 855 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 856 857 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 858 859 nilfs_truncate_bmap(ii, blkoff); 860 861 inode->i_mtime = inode->i_ctime = current_time(inode); 862 if (IS_SYNC(inode)) 863 nilfs_set_transaction_flag(NILFS_TI_SYNC); 864 865 nilfs_mark_inode_dirty(inode); 866 nilfs_set_file_dirty(inode, 0); 867 nilfs_transaction_commit(sb); 868 /* 869 * May construct a logical segment and may fail in sync mode. 870 * But truncate has no return value. 871 */ 872 } 873 874 static void nilfs_clear_inode(struct inode *inode) 875 { 876 struct nilfs_inode_info *ii = NILFS_I(inode); 877 878 /* 879 * Free resources allocated in nilfs_read_inode(), here. 880 */ 881 BUG_ON(!list_empty(&ii->i_dirty)); 882 brelse(ii->i_bh); 883 ii->i_bh = NULL; 884 885 if (nilfs_is_metadata_file_inode(inode)) 886 nilfs_mdt_clear(inode); 887 888 if (test_bit(NILFS_I_BMAP, &ii->i_state)) 889 nilfs_bmap_clear(ii->i_bmap); 890 891 if (!test_bit(NILFS_I_BTNC, &ii->i_state)) 892 nilfs_detach_btree_node_cache(inode); 893 894 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 895 nilfs_put_root(ii->i_root); 896 } 897 898 void nilfs_evict_inode(struct inode *inode) 899 { 900 struct nilfs_transaction_info ti; 901 struct super_block *sb = inode->i_sb; 902 struct nilfs_inode_info *ii = NILFS_I(inode); 903 int ret; 904 905 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 906 truncate_inode_pages_final(&inode->i_data); 907 clear_inode(inode); 908 nilfs_clear_inode(inode); 909 return; 910 } 911 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 912 913 truncate_inode_pages_final(&inode->i_data); 914 915 /* TODO: some of the following operations may fail. */ 916 nilfs_truncate_bmap(ii, 0); 917 nilfs_mark_inode_dirty(inode); 918 clear_inode(inode); 919 920 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 921 if (!ret) 922 atomic64_dec(&ii->i_root->inodes_count); 923 924 nilfs_clear_inode(inode); 925 926 if (IS_SYNC(inode)) 927 nilfs_set_transaction_flag(NILFS_TI_SYNC); 928 nilfs_transaction_commit(sb); 929 /* 930 * May construct a logical segment and may fail in sync mode. 931 * But delete_inode has no return value. 932 */ 933 } 934 935 int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 936 struct iattr *iattr) 937 { 938 struct nilfs_transaction_info ti; 939 struct inode *inode = d_inode(dentry); 940 struct super_block *sb = inode->i_sb; 941 int err; 942 943 err = setattr_prepare(&init_user_ns, dentry, iattr); 944 if (err) 945 return err; 946 947 err = nilfs_transaction_begin(sb, &ti, 0); 948 if (unlikely(err)) 949 return err; 950 951 if ((iattr->ia_valid & ATTR_SIZE) && 952 iattr->ia_size != i_size_read(inode)) { 953 inode_dio_wait(inode); 954 truncate_setsize(inode, iattr->ia_size); 955 nilfs_truncate(inode); 956 } 957 958 setattr_copy(&init_user_ns, inode, iattr); 959 mark_inode_dirty(inode); 960 961 if (iattr->ia_valid & ATTR_MODE) { 962 err = nilfs_acl_chmod(inode); 963 if (unlikely(err)) 964 goto out_err; 965 } 966 967 return nilfs_transaction_commit(sb); 968 969 out_err: 970 nilfs_transaction_abort(sb); 971 return err; 972 } 973 974 int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode, 975 int mask) 976 { 977 struct nilfs_root *root = NILFS_I(inode)->i_root; 978 979 if ((mask & MAY_WRITE) && root && 980 root->cno != NILFS_CPTREE_CURRENT_CNO) 981 return -EROFS; /* snapshot is not writable */ 982 983 return generic_permission(&init_user_ns, inode, mask); 984 } 985 986 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 987 { 988 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 989 struct nilfs_inode_info *ii = NILFS_I(inode); 990 int err; 991 992 spin_lock(&nilfs->ns_inode_lock); 993 if (ii->i_bh == NULL) { 994 spin_unlock(&nilfs->ns_inode_lock); 995 err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 996 inode->i_ino, pbh); 997 if (unlikely(err)) 998 return err; 999 spin_lock(&nilfs->ns_inode_lock); 1000 if (ii->i_bh == NULL) 1001 ii->i_bh = *pbh; 1002 else { 1003 brelse(*pbh); 1004 *pbh = ii->i_bh; 1005 } 1006 } else 1007 *pbh = ii->i_bh; 1008 1009 get_bh(*pbh); 1010 spin_unlock(&nilfs->ns_inode_lock); 1011 return 0; 1012 } 1013 1014 int nilfs_inode_dirty(struct inode *inode) 1015 { 1016 struct nilfs_inode_info *ii = NILFS_I(inode); 1017 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1018 int ret = 0; 1019 1020 if (!list_empty(&ii->i_dirty)) { 1021 spin_lock(&nilfs->ns_inode_lock); 1022 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 1023 test_bit(NILFS_I_BUSY, &ii->i_state); 1024 spin_unlock(&nilfs->ns_inode_lock); 1025 } 1026 return ret; 1027 } 1028 1029 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) 1030 { 1031 struct nilfs_inode_info *ii = NILFS_I(inode); 1032 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1033 1034 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 1035 1036 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 1037 return 0; 1038 1039 spin_lock(&nilfs->ns_inode_lock); 1040 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 1041 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 1042 /* 1043 * Because this routine may race with nilfs_dispose_list(), 1044 * we have to check NILFS_I_QUEUED here, too. 1045 */ 1046 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 1047 /* 1048 * This will happen when somebody is freeing 1049 * this inode. 1050 */ 1051 nilfs_warn(inode->i_sb, 1052 "cannot set file dirty (ino=%lu): the file is being freed", 1053 inode->i_ino); 1054 spin_unlock(&nilfs->ns_inode_lock); 1055 return -EINVAL; /* 1056 * NILFS_I_DIRTY may remain for 1057 * freeing inode. 1058 */ 1059 } 1060 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 1061 set_bit(NILFS_I_QUEUED, &ii->i_state); 1062 } 1063 spin_unlock(&nilfs->ns_inode_lock); 1064 return 0; 1065 } 1066 1067 int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 1068 { 1069 struct buffer_head *ibh; 1070 int err; 1071 1072 err = nilfs_load_inode_block(inode, &ibh); 1073 if (unlikely(err)) { 1074 nilfs_warn(inode->i_sb, 1075 "cannot mark inode dirty (ino=%lu): error %d loading inode block", 1076 inode->i_ino, err); 1077 return err; 1078 } 1079 nilfs_update_inode(inode, ibh, flags); 1080 mark_buffer_dirty(ibh); 1081 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 1082 brelse(ibh); 1083 return 0; 1084 } 1085 1086 /** 1087 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 1088 * @inode: inode of the file to be registered. 1089 * @flags: flags to determine the dirty state of the inode 1090 * 1091 * nilfs_dirty_inode() loads a inode block containing the specified 1092 * @inode and copies data from a nilfs_inode to a corresponding inode 1093 * entry in the inode block. This operation is excluded from the segment 1094 * construction. This function can be called both as a single operation 1095 * and as a part of indivisible file operations. 1096 */ 1097 void nilfs_dirty_inode(struct inode *inode, int flags) 1098 { 1099 struct nilfs_transaction_info ti; 1100 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 1101 1102 if (is_bad_inode(inode)) { 1103 nilfs_warn(inode->i_sb, 1104 "tried to mark bad_inode dirty. ignored."); 1105 dump_stack(); 1106 return; 1107 } 1108 if (mdi) { 1109 nilfs_mdt_mark_dirty(inode); 1110 return; 1111 } 1112 nilfs_transaction_begin(inode->i_sb, &ti, 0); 1113 __nilfs_mark_inode_dirty(inode, flags); 1114 nilfs_transaction_commit(inode->i_sb); /* never fails */ 1115 } 1116 1117 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1118 __u64 start, __u64 len) 1119 { 1120 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1121 __u64 logical = 0, phys = 0, size = 0; 1122 __u32 flags = 0; 1123 loff_t isize; 1124 sector_t blkoff, end_blkoff; 1125 sector_t delalloc_blkoff; 1126 unsigned long delalloc_blklen; 1127 unsigned int blkbits = inode->i_blkbits; 1128 int ret, n; 1129 1130 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 1131 if (ret) 1132 return ret; 1133 1134 inode_lock(inode); 1135 1136 isize = i_size_read(inode); 1137 1138 blkoff = start >> blkbits; 1139 end_blkoff = (start + len - 1) >> blkbits; 1140 1141 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1142 &delalloc_blkoff); 1143 1144 do { 1145 __u64 blkphy; 1146 unsigned int maxblocks; 1147 1148 if (delalloc_blklen && blkoff == delalloc_blkoff) { 1149 if (size) { 1150 /* End of the current extent */ 1151 ret = fiemap_fill_next_extent( 1152 fieinfo, logical, phys, size, flags); 1153 if (ret) 1154 break; 1155 } 1156 if (blkoff > end_blkoff) 1157 break; 1158 1159 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1160 logical = blkoff << blkbits; 1161 phys = 0; 1162 size = delalloc_blklen << blkbits; 1163 1164 blkoff = delalloc_blkoff + delalloc_blklen; 1165 delalloc_blklen = nilfs_find_uncommitted_extent( 1166 inode, blkoff, &delalloc_blkoff); 1167 continue; 1168 } 1169 1170 /* 1171 * Limit the number of blocks that we look up so as 1172 * not to get into the next delayed allocation extent. 1173 */ 1174 maxblocks = INT_MAX; 1175 if (delalloc_blklen) 1176 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1177 maxblocks); 1178 blkphy = 0; 1179 1180 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1181 n = nilfs_bmap_lookup_contig( 1182 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1183 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1184 1185 if (n < 0) { 1186 int past_eof; 1187 1188 if (unlikely(n != -ENOENT)) 1189 break; /* error */ 1190 1191 /* HOLE */ 1192 blkoff++; 1193 past_eof = ((blkoff << blkbits) >= isize); 1194 1195 if (size) { 1196 /* End of the current extent */ 1197 1198 if (past_eof) 1199 flags |= FIEMAP_EXTENT_LAST; 1200 1201 ret = fiemap_fill_next_extent( 1202 fieinfo, logical, phys, size, flags); 1203 if (ret) 1204 break; 1205 size = 0; 1206 } 1207 if (blkoff > end_blkoff || past_eof) 1208 break; 1209 } else { 1210 if (size) { 1211 if (phys && blkphy << blkbits == phys + size) { 1212 /* The current extent goes on */ 1213 size += n << blkbits; 1214 } else { 1215 /* Terminate the current extent */ 1216 ret = fiemap_fill_next_extent( 1217 fieinfo, logical, phys, size, 1218 flags); 1219 if (ret || blkoff > end_blkoff) 1220 break; 1221 1222 /* Start another extent */ 1223 flags = FIEMAP_EXTENT_MERGED; 1224 logical = blkoff << blkbits; 1225 phys = blkphy << blkbits; 1226 size = n << blkbits; 1227 } 1228 } else { 1229 /* Start a new extent */ 1230 flags = FIEMAP_EXTENT_MERGED; 1231 logical = blkoff << blkbits; 1232 phys = blkphy << blkbits; 1233 size = n << blkbits; 1234 } 1235 blkoff += n; 1236 } 1237 cond_resched(); 1238 } while (true); 1239 1240 /* If ret is 1 then we just hit the end of the extent array */ 1241 if (ret == 1) 1242 ret = 0; 1243 1244 inode_unlock(inode); 1245 return ret; 1246 } 1247