1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/gfp.h> 26 #include <linux/mpage.h> 27 #include <linux/pagemap.h> 28 #include <linux/writeback.h> 29 #include <linux/uio.h> 30 #include "nilfs.h" 31 #include "btnode.h" 32 #include "segment.h" 33 #include "page.h" 34 #include "mdt.h" 35 #include "cpfile.h" 36 #include "ifile.h" 37 38 /** 39 * struct nilfs_iget_args - arguments used during comparison between inodes 40 * @ino: inode number 41 * @cno: checkpoint number 42 * @root: pointer on NILFS root object (mounted checkpoint) 43 * @for_gc: inode for GC flag 44 */ 45 struct nilfs_iget_args { 46 u64 ino; 47 __u64 cno; 48 struct nilfs_root *root; 49 int for_gc; 50 }; 51 52 static int nilfs_iget_test(struct inode *inode, void *opaque); 53 54 void nilfs_inode_add_blocks(struct inode *inode, int n) 55 { 56 struct nilfs_root *root = NILFS_I(inode)->i_root; 57 58 inode_add_bytes(inode, (1 << inode->i_blkbits) * n); 59 if (root) 60 atomic64_add(n, &root->blocks_count); 61 } 62 63 void nilfs_inode_sub_blocks(struct inode *inode, int n) 64 { 65 struct nilfs_root *root = NILFS_I(inode)->i_root; 66 67 inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); 68 if (root) 69 atomic64_sub(n, &root->blocks_count); 70 } 71 72 /** 73 * nilfs_get_block() - get a file block on the filesystem (callback function) 74 * @inode - inode struct of the target file 75 * @blkoff - file block number 76 * @bh_result - buffer head to be mapped on 77 * @create - indicate whether allocating the block or not when it has not 78 * been allocated yet. 79 * 80 * This function does not issue actual read request of the specified data 81 * block. It is done by VFS. 82 */ 83 int nilfs_get_block(struct inode *inode, sector_t blkoff, 84 struct buffer_head *bh_result, int create) 85 { 86 struct nilfs_inode_info *ii = NILFS_I(inode); 87 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 88 __u64 blknum = 0; 89 int err = 0, ret; 90 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 91 92 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 93 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 94 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 95 if (ret >= 0) { /* found */ 96 map_bh(bh_result, inode->i_sb, blknum); 97 if (ret > 0) 98 bh_result->b_size = (ret << inode->i_blkbits); 99 goto out; 100 } 101 /* data block was not found */ 102 if (ret == -ENOENT && create) { 103 struct nilfs_transaction_info ti; 104 105 bh_result->b_blocknr = 0; 106 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 107 if (unlikely(err)) 108 goto out; 109 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 110 (unsigned long)bh_result); 111 if (unlikely(err != 0)) { 112 if (err == -EEXIST) { 113 /* 114 * The get_block() function could be called 115 * from multiple callers for an inode. 116 * However, the page having this block must 117 * be locked in this case. 118 */ 119 printk(KERN_WARNING 120 "nilfs_get_block: a race condition " 121 "while inserting a data block. " 122 "(inode number=%lu, file block " 123 "offset=%llu)\n", 124 inode->i_ino, 125 (unsigned long long)blkoff); 126 err = 0; 127 } 128 nilfs_transaction_abort(inode->i_sb); 129 goto out; 130 } 131 nilfs_mark_inode_dirty_sync(inode); 132 nilfs_transaction_commit(inode->i_sb); /* never fails */ 133 /* Error handling should be detailed */ 134 set_buffer_new(bh_result); 135 set_buffer_delay(bh_result); 136 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 137 to proper value */ 138 } else if (ret == -ENOENT) { 139 /* not found is not error (e.g. hole); must return without 140 the mapped state flag. */ 141 ; 142 } else { 143 err = ret; 144 } 145 146 out: 147 return err; 148 } 149 150 /** 151 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 152 * address_space_operations. 153 * @file - file struct of the file to be read 154 * @page - the page to be read 155 */ 156 static int nilfs_readpage(struct file *file, struct page *page) 157 { 158 return mpage_readpage(page, nilfs_get_block); 159 } 160 161 /** 162 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 163 * address_space_operations. 164 * @file - file struct of the file to be read 165 * @mapping - address_space struct used for reading multiple pages 166 * @pages - the pages to be read 167 * @nr_pages - number of pages to be read 168 */ 169 static int nilfs_readpages(struct file *file, struct address_space *mapping, 170 struct list_head *pages, unsigned nr_pages) 171 { 172 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 173 } 174 175 static int nilfs_writepages(struct address_space *mapping, 176 struct writeback_control *wbc) 177 { 178 struct inode *inode = mapping->host; 179 int err = 0; 180 181 if (inode->i_sb->s_flags & MS_RDONLY) { 182 nilfs_clear_dirty_pages(mapping, false); 183 return -EROFS; 184 } 185 186 if (wbc->sync_mode == WB_SYNC_ALL) 187 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 188 wbc->range_start, 189 wbc->range_end); 190 return err; 191 } 192 193 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 194 { 195 struct inode *inode = page->mapping->host; 196 int err; 197 198 if (inode->i_sb->s_flags & MS_RDONLY) { 199 /* 200 * It means that filesystem was remounted in read-only 201 * mode because of error or metadata corruption. But we 202 * have dirty pages that try to be flushed in background. 203 * So, here we simply discard this dirty page. 204 */ 205 nilfs_clear_dirty_page(page, false); 206 unlock_page(page); 207 return -EROFS; 208 } 209 210 redirty_page_for_writepage(wbc, page); 211 unlock_page(page); 212 213 if (wbc->sync_mode == WB_SYNC_ALL) { 214 err = nilfs_construct_segment(inode->i_sb); 215 if (unlikely(err)) 216 return err; 217 } else if (wbc->for_reclaim) 218 nilfs_flush_segment(inode->i_sb, inode->i_ino); 219 220 return 0; 221 } 222 223 static int nilfs_set_page_dirty(struct page *page) 224 { 225 struct inode *inode = page->mapping->host; 226 int ret = __set_page_dirty_nobuffers(page); 227 228 if (page_has_buffers(page)) { 229 unsigned nr_dirty = 0; 230 struct buffer_head *bh, *head; 231 232 /* 233 * This page is locked by callers, and no other thread 234 * concurrently marks its buffers dirty since they are 235 * only dirtied through routines in fs/buffer.c in 236 * which call sites of mark_buffer_dirty are protected 237 * by page lock. 238 */ 239 bh = head = page_buffers(page); 240 do { 241 /* Do not mark hole blocks dirty */ 242 if (buffer_dirty(bh) || !buffer_mapped(bh)) 243 continue; 244 245 set_buffer_dirty(bh); 246 nr_dirty++; 247 } while (bh = bh->b_this_page, bh != head); 248 249 if (nr_dirty) 250 nilfs_set_file_dirty(inode, nr_dirty); 251 } else if (ret) { 252 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 253 254 nilfs_set_file_dirty(inode, nr_dirty); 255 } 256 return ret; 257 } 258 259 void nilfs_write_failed(struct address_space *mapping, loff_t to) 260 { 261 struct inode *inode = mapping->host; 262 263 if (to > inode->i_size) { 264 truncate_pagecache(inode, inode->i_size); 265 nilfs_truncate(inode); 266 } 267 } 268 269 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 270 loff_t pos, unsigned len, unsigned flags, 271 struct page **pagep, void **fsdata) 272 273 { 274 struct inode *inode = mapping->host; 275 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 276 277 if (unlikely(err)) 278 return err; 279 280 err = block_write_begin(mapping, pos, len, flags, pagep, 281 nilfs_get_block); 282 if (unlikely(err)) { 283 nilfs_write_failed(mapping, pos + len); 284 nilfs_transaction_abort(inode->i_sb); 285 } 286 return err; 287 } 288 289 static int nilfs_write_end(struct file *file, struct address_space *mapping, 290 loff_t pos, unsigned len, unsigned copied, 291 struct page *page, void *fsdata) 292 { 293 struct inode *inode = mapping->host; 294 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 295 unsigned nr_dirty; 296 int err; 297 298 nr_dirty = nilfs_page_count_clean_buffers(page, start, 299 start + copied); 300 copied = generic_write_end(file, mapping, pos, len, copied, page, 301 fsdata); 302 nilfs_set_file_dirty(inode, nr_dirty); 303 err = nilfs_transaction_commit(inode->i_sb); 304 return err ? : copied; 305 } 306 307 static ssize_t 308 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) 309 { 310 struct file *file = iocb->ki_filp; 311 struct address_space *mapping = file->f_mapping; 312 struct inode *inode = file->f_mapping->host; 313 size_t count = iov_iter_count(iter); 314 ssize_t size; 315 316 if (iov_iter_rw(iter) == WRITE) 317 return 0; 318 319 /* Needs synchronization with the cleaner */ 320 size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); 321 322 /* 323 * In case of error extending write may have instantiated a few 324 * blocks outside i_size. Trim these off again. 325 */ 326 if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) { 327 loff_t isize = i_size_read(inode); 328 loff_t end = offset + count; 329 330 if (end > isize) 331 nilfs_write_failed(mapping, end); 332 } 333 334 return size; 335 } 336 337 const struct address_space_operations nilfs_aops = { 338 .writepage = nilfs_writepage, 339 .readpage = nilfs_readpage, 340 .writepages = nilfs_writepages, 341 .set_page_dirty = nilfs_set_page_dirty, 342 .readpages = nilfs_readpages, 343 .write_begin = nilfs_write_begin, 344 .write_end = nilfs_write_end, 345 /* .releasepage = nilfs_releasepage, */ 346 .invalidatepage = block_invalidatepage, 347 .direct_IO = nilfs_direct_IO, 348 .is_partially_uptodate = block_is_partially_uptodate, 349 }; 350 351 static int nilfs_insert_inode_locked(struct inode *inode, 352 struct nilfs_root *root, 353 unsigned long ino) 354 { 355 struct nilfs_iget_args args = { 356 .ino = ino, .root = root, .cno = 0, .for_gc = 0 357 }; 358 359 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 360 } 361 362 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 363 { 364 struct super_block *sb = dir->i_sb; 365 struct the_nilfs *nilfs = sb->s_fs_info; 366 struct inode *inode; 367 struct nilfs_inode_info *ii; 368 struct nilfs_root *root; 369 int err = -ENOMEM; 370 ino_t ino; 371 372 inode = new_inode(sb); 373 if (unlikely(!inode)) 374 goto failed; 375 376 mapping_set_gfp_mask(inode->i_mapping, 377 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 378 379 root = NILFS_I(dir)->i_root; 380 ii = NILFS_I(inode); 381 ii->i_state = 1 << NILFS_I_NEW; 382 ii->i_root = root; 383 384 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); 385 if (unlikely(err)) 386 goto failed_ifile_create_inode; 387 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 388 389 atomic64_inc(&root->inodes_count); 390 inode_init_owner(inode, dir, mode); 391 inode->i_ino = ino; 392 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 393 394 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 395 err = nilfs_bmap_read(ii->i_bmap, NULL); 396 if (err < 0) 397 goto failed_after_creation; 398 399 set_bit(NILFS_I_BMAP, &ii->i_state); 400 /* No lock is needed; iget() ensures it. */ 401 } 402 403 ii->i_flags = nilfs_mask_flags( 404 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 405 406 /* ii->i_file_acl = 0; */ 407 /* ii->i_dir_acl = 0; */ 408 ii->i_dir_start_lookup = 0; 409 nilfs_set_inode_flags(inode); 410 spin_lock(&nilfs->ns_next_gen_lock); 411 inode->i_generation = nilfs->ns_next_generation++; 412 spin_unlock(&nilfs->ns_next_gen_lock); 413 if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 414 err = -EIO; 415 goto failed_after_creation; 416 } 417 418 err = nilfs_init_acl(inode, dir); 419 if (unlikely(err)) 420 goto failed_after_creation; /* never occur. When supporting 421 nilfs_init_acl(), proper cancellation of 422 above jobs should be considered */ 423 424 return inode; 425 426 failed_after_creation: 427 clear_nlink(inode); 428 unlock_new_inode(inode); 429 iput(inode); /* raw_inode will be deleted through 430 nilfs_evict_inode() */ 431 goto failed; 432 433 failed_ifile_create_inode: 434 make_bad_inode(inode); 435 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 436 called */ 437 failed: 438 return ERR_PTR(err); 439 } 440 441 void nilfs_set_inode_flags(struct inode *inode) 442 { 443 unsigned int flags = NILFS_I(inode)->i_flags; 444 445 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 446 S_DIRSYNC); 447 if (flags & FS_SYNC_FL) 448 inode->i_flags |= S_SYNC; 449 if (flags & FS_APPEND_FL) 450 inode->i_flags |= S_APPEND; 451 if (flags & FS_IMMUTABLE_FL) 452 inode->i_flags |= S_IMMUTABLE; 453 if (flags & FS_NOATIME_FL) 454 inode->i_flags |= S_NOATIME; 455 if (flags & FS_DIRSYNC_FL) 456 inode->i_flags |= S_DIRSYNC; 457 mapping_set_gfp_mask(inode->i_mapping, 458 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 459 } 460 461 int nilfs_read_inode_common(struct inode *inode, 462 struct nilfs_inode *raw_inode) 463 { 464 struct nilfs_inode_info *ii = NILFS_I(inode); 465 int err; 466 467 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 468 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 469 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 470 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 471 inode->i_size = le64_to_cpu(raw_inode->i_size); 472 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 473 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 474 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 475 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 476 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 477 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 478 if (inode->i_nlink == 0) 479 return -ESTALE; /* this inode is deleted */ 480 481 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 482 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 483 #if 0 484 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 485 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 486 0 : le32_to_cpu(raw_inode->i_dir_acl); 487 #endif 488 ii->i_dir_start_lookup = 0; 489 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 490 491 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 492 S_ISLNK(inode->i_mode)) { 493 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 494 if (err < 0) 495 return err; 496 set_bit(NILFS_I_BMAP, &ii->i_state); 497 /* No lock is needed; iget() ensures it. */ 498 } 499 return 0; 500 } 501 502 static int __nilfs_read_inode(struct super_block *sb, 503 struct nilfs_root *root, unsigned long ino, 504 struct inode *inode) 505 { 506 struct the_nilfs *nilfs = sb->s_fs_info; 507 struct buffer_head *bh; 508 struct nilfs_inode *raw_inode; 509 int err; 510 511 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 512 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 513 if (unlikely(err)) 514 goto bad_inode; 515 516 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 517 518 err = nilfs_read_inode_common(inode, raw_inode); 519 if (err) 520 goto failed_unmap; 521 522 if (S_ISREG(inode->i_mode)) { 523 inode->i_op = &nilfs_file_inode_operations; 524 inode->i_fop = &nilfs_file_operations; 525 inode->i_mapping->a_ops = &nilfs_aops; 526 } else if (S_ISDIR(inode->i_mode)) { 527 inode->i_op = &nilfs_dir_inode_operations; 528 inode->i_fop = &nilfs_dir_operations; 529 inode->i_mapping->a_ops = &nilfs_aops; 530 } else if (S_ISLNK(inode->i_mode)) { 531 inode->i_op = &nilfs_symlink_inode_operations; 532 inode->i_mapping->a_ops = &nilfs_aops; 533 } else { 534 inode->i_op = &nilfs_special_inode_operations; 535 init_special_inode( 536 inode, inode->i_mode, 537 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 538 } 539 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 540 brelse(bh); 541 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 542 nilfs_set_inode_flags(inode); 543 return 0; 544 545 failed_unmap: 546 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 547 brelse(bh); 548 549 bad_inode: 550 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 551 return err; 552 } 553 554 static int nilfs_iget_test(struct inode *inode, void *opaque) 555 { 556 struct nilfs_iget_args *args = opaque; 557 struct nilfs_inode_info *ii; 558 559 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 560 return 0; 561 562 ii = NILFS_I(inode); 563 if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 564 return !args->for_gc; 565 566 return args->for_gc && args->cno == ii->i_cno; 567 } 568 569 static int nilfs_iget_set(struct inode *inode, void *opaque) 570 { 571 struct nilfs_iget_args *args = opaque; 572 573 inode->i_ino = args->ino; 574 if (args->for_gc) { 575 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE; 576 NILFS_I(inode)->i_cno = args->cno; 577 NILFS_I(inode)->i_root = NULL; 578 } else { 579 if (args->root && args->ino == NILFS_ROOT_INO) 580 nilfs_get_root(args->root); 581 NILFS_I(inode)->i_root = args->root; 582 } 583 return 0; 584 } 585 586 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 587 unsigned long ino) 588 { 589 struct nilfs_iget_args args = { 590 .ino = ino, .root = root, .cno = 0, .for_gc = 0 591 }; 592 593 return ilookup5(sb, ino, nilfs_iget_test, &args); 594 } 595 596 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 597 unsigned long ino) 598 { 599 struct nilfs_iget_args args = { 600 .ino = ino, .root = root, .cno = 0, .for_gc = 0 601 }; 602 603 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 604 } 605 606 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 607 unsigned long ino) 608 { 609 struct inode *inode; 610 int err; 611 612 inode = nilfs_iget_locked(sb, root, ino); 613 if (unlikely(!inode)) 614 return ERR_PTR(-ENOMEM); 615 if (!(inode->i_state & I_NEW)) 616 return inode; 617 618 err = __nilfs_read_inode(sb, root, ino, inode); 619 if (unlikely(err)) { 620 iget_failed(inode); 621 return ERR_PTR(err); 622 } 623 unlock_new_inode(inode); 624 return inode; 625 } 626 627 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 628 __u64 cno) 629 { 630 struct nilfs_iget_args args = { 631 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1 632 }; 633 struct inode *inode; 634 int err; 635 636 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 637 if (unlikely(!inode)) 638 return ERR_PTR(-ENOMEM); 639 if (!(inode->i_state & I_NEW)) 640 return inode; 641 642 err = nilfs_init_gcinode(inode); 643 if (unlikely(err)) { 644 iget_failed(inode); 645 return ERR_PTR(err); 646 } 647 unlock_new_inode(inode); 648 return inode; 649 } 650 651 void nilfs_write_inode_common(struct inode *inode, 652 struct nilfs_inode *raw_inode, int has_bmap) 653 { 654 struct nilfs_inode_info *ii = NILFS_I(inode); 655 656 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 657 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 658 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 659 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 660 raw_inode->i_size = cpu_to_le64(inode->i_size); 661 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 662 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 663 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 664 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 665 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 666 667 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 668 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 669 670 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 671 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 672 673 /* zero-fill unused portion in the case of super root block */ 674 raw_inode->i_xattr = 0; 675 raw_inode->i_pad = 0; 676 memset((void *)raw_inode + sizeof(*raw_inode), 0, 677 nilfs->ns_inode_size - sizeof(*raw_inode)); 678 } 679 680 if (has_bmap) 681 nilfs_bmap_write(ii->i_bmap, raw_inode); 682 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 683 raw_inode->i_device_code = 684 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 685 /* When extending inode, nilfs->ns_inode_size should be checked 686 for substitutions of appended fields */ 687 } 688 689 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 690 { 691 ino_t ino = inode->i_ino; 692 struct nilfs_inode_info *ii = NILFS_I(inode); 693 struct inode *ifile = ii->i_root->ifile; 694 struct nilfs_inode *raw_inode; 695 696 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 697 698 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 699 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 700 if (flags & I_DIRTY_DATASYNC) 701 set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 702 703 nilfs_write_inode_common(inode, raw_inode, 0); 704 /* XXX: call with has_bmap = 0 is a workaround to avoid 705 deadlock of bmap. This delays update of i_bmap to just 706 before writing */ 707 nilfs_ifile_unmap_inode(ifile, ino, ibh); 708 } 709 710 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 711 712 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 713 unsigned long from) 714 { 715 unsigned long b; 716 int ret; 717 718 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 719 return; 720 repeat: 721 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 722 if (ret == -ENOENT) 723 return; 724 else if (ret < 0) 725 goto failed; 726 727 if (b < from) 728 return; 729 730 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 731 ret = nilfs_bmap_truncate(ii->i_bmap, b); 732 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 733 if (!ret || (ret == -ENOMEM && 734 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 735 goto repeat; 736 737 failed: 738 nilfs_warning(ii->vfs_inode.i_sb, __func__, 739 "failed to truncate bmap (ino=%lu, err=%d)", 740 ii->vfs_inode.i_ino, ret); 741 } 742 743 void nilfs_truncate(struct inode *inode) 744 { 745 unsigned long blkoff; 746 unsigned int blocksize; 747 struct nilfs_transaction_info ti; 748 struct super_block *sb = inode->i_sb; 749 struct nilfs_inode_info *ii = NILFS_I(inode); 750 751 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 752 return; 753 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 754 return; 755 756 blocksize = sb->s_blocksize; 757 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 758 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 759 760 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 761 762 nilfs_truncate_bmap(ii, blkoff); 763 764 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 765 if (IS_SYNC(inode)) 766 nilfs_set_transaction_flag(NILFS_TI_SYNC); 767 768 nilfs_mark_inode_dirty(inode); 769 nilfs_set_file_dirty(inode, 0); 770 nilfs_transaction_commit(sb); 771 /* May construct a logical segment and may fail in sync mode. 772 But truncate has no return value. */ 773 } 774 775 static void nilfs_clear_inode(struct inode *inode) 776 { 777 struct nilfs_inode_info *ii = NILFS_I(inode); 778 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 779 780 /* 781 * Free resources allocated in nilfs_read_inode(), here. 782 */ 783 BUG_ON(!list_empty(&ii->i_dirty)); 784 brelse(ii->i_bh); 785 ii->i_bh = NULL; 786 787 if (mdi && mdi->mi_palloc_cache) 788 nilfs_palloc_destroy_cache(inode); 789 790 if (test_bit(NILFS_I_BMAP, &ii->i_state)) 791 nilfs_bmap_clear(ii->i_bmap); 792 793 nilfs_btnode_cache_clear(&ii->i_btnode_cache); 794 795 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 796 nilfs_put_root(ii->i_root); 797 } 798 799 void nilfs_evict_inode(struct inode *inode) 800 { 801 struct nilfs_transaction_info ti; 802 struct super_block *sb = inode->i_sb; 803 struct nilfs_inode_info *ii = NILFS_I(inode); 804 int ret; 805 806 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 807 truncate_inode_pages_final(&inode->i_data); 808 clear_inode(inode); 809 nilfs_clear_inode(inode); 810 return; 811 } 812 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 813 814 truncate_inode_pages_final(&inode->i_data); 815 816 /* TODO: some of the following operations may fail. */ 817 nilfs_truncate_bmap(ii, 0); 818 nilfs_mark_inode_dirty(inode); 819 clear_inode(inode); 820 821 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 822 if (!ret) 823 atomic64_dec(&ii->i_root->inodes_count); 824 825 nilfs_clear_inode(inode); 826 827 if (IS_SYNC(inode)) 828 nilfs_set_transaction_flag(NILFS_TI_SYNC); 829 nilfs_transaction_commit(sb); 830 /* May construct a logical segment and may fail in sync mode. 831 But delete_inode has no return value. */ 832 } 833 834 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 835 { 836 struct nilfs_transaction_info ti; 837 struct inode *inode = dentry->d_inode; 838 struct super_block *sb = inode->i_sb; 839 int err; 840 841 err = inode_change_ok(inode, iattr); 842 if (err) 843 return err; 844 845 err = nilfs_transaction_begin(sb, &ti, 0); 846 if (unlikely(err)) 847 return err; 848 849 if ((iattr->ia_valid & ATTR_SIZE) && 850 iattr->ia_size != i_size_read(inode)) { 851 inode_dio_wait(inode); 852 truncate_setsize(inode, iattr->ia_size); 853 nilfs_truncate(inode); 854 } 855 856 setattr_copy(inode, iattr); 857 mark_inode_dirty(inode); 858 859 if (iattr->ia_valid & ATTR_MODE) { 860 err = nilfs_acl_chmod(inode); 861 if (unlikely(err)) 862 goto out_err; 863 } 864 865 return nilfs_transaction_commit(sb); 866 867 out_err: 868 nilfs_transaction_abort(sb); 869 return err; 870 } 871 872 int nilfs_permission(struct inode *inode, int mask) 873 { 874 struct nilfs_root *root = NILFS_I(inode)->i_root; 875 if ((mask & MAY_WRITE) && root && 876 root->cno != NILFS_CPTREE_CURRENT_CNO) 877 return -EROFS; /* snapshot is not writable */ 878 879 return generic_permission(inode, mask); 880 } 881 882 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 883 { 884 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 885 struct nilfs_inode_info *ii = NILFS_I(inode); 886 int err; 887 888 spin_lock(&nilfs->ns_inode_lock); 889 if (ii->i_bh == NULL) { 890 spin_unlock(&nilfs->ns_inode_lock); 891 err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 892 inode->i_ino, pbh); 893 if (unlikely(err)) 894 return err; 895 spin_lock(&nilfs->ns_inode_lock); 896 if (ii->i_bh == NULL) 897 ii->i_bh = *pbh; 898 else { 899 brelse(*pbh); 900 *pbh = ii->i_bh; 901 } 902 } else 903 *pbh = ii->i_bh; 904 905 get_bh(*pbh); 906 spin_unlock(&nilfs->ns_inode_lock); 907 return 0; 908 } 909 910 int nilfs_inode_dirty(struct inode *inode) 911 { 912 struct nilfs_inode_info *ii = NILFS_I(inode); 913 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 914 int ret = 0; 915 916 if (!list_empty(&ii->i_dirty)) { 917 spin_lock(&nilfs->ns_inode_lock); 918 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 919 test_bit(NILFS_I_BUSY, &ii->i_state); 920 spin_unlock(&nilfs->ns_inode_lock); 921 } 922 return ret; 923 } 924 925 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) 926 { 927 struct nilfs_inode_info *ii = NILFS_I(inode); 928 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 929 930 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 931 932 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 933 return 0; 934 935 spin_lock(&nilfs->ns_inode_lock); 936 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 937 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 938 /* Because this routine may race with nilfs_dispose_list(), 939 we have to check NILFS_I_QUEUED here, too. */ 940 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 941 /* This will happen when somebody is freeing 942 this inode. */ 943 nilfs_warning(inode->i_sb, __func__, 944 "cannot get inode (ino=%lu)\n", 945 inode->i_ino); 946 spin_unlock(&nilfs->ns_inode_lock); 947 return -EINVAL; /* NILFS_I_DIRTY may remain for 948 freeing inode */ 949 } 950 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 951 set_bit(NILFS_I_QUEUED, &ii->i_state); 952 } 953 spin_unlock(&nilfs->ns_inode_lock); 954 return 0; 955 } 956 957 int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 958 { 959 struct buffer_head *ibh; 960 int err; 961 962 err = nilfs_load_inode_block(inode, &ibh); 963 if (unlikely(err)) { 964 nilfs_warning(inode->i_sb, __func__, 965 "failed to reget inode block.\n"); 966 return err; 967 } 968 nilfs_update_inode(inode, ibh, flags); 969 mark_buffer_dirty(ibh); 970 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 971 brelse(ibh); 972 return 0; 973 } 974 975 /** 976 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 977 * @inode: inode of the file to be registered. 978 * 979 * nilfs_dirty_inode() loads a inode block containing the specified 980 * @inode and copies data from a nilfs_inode to a corresponding inode 981 * entry in the inode block. This operation is excluded from the segment 982 * construction. This function can be called both as a single operation 983 * and as a part of indivisible file operations. 984 */ 985 void nilfs_dirty_inode(struct inode *inode, int flags) 986 { 987 struct nilfs_transaction_info ti; 988 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 989 990 if (is_bad_inode(inode)) { 991 nilfs_warning(inode->i_sb, __func__, 992 "tried to mark bad_inode dirty. ignored.\n"); 993 dump_stack(); 994 return; 995 } 996 if (mdi) { 997 nilfs_mdt_mark_dirty(inode); 998 return; 999 } 1000 nilfs_transaction_begin(inode->i_sb, &ti, 0); 1001 __nilfs_mark_inode_dirty(inode, flags); 1002 nilfs_transaction_commit(inode->i_sb); /* never fails */ 1003 } 1004 1005 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1006 __u64 start, __u64 len) 1007 { 1008 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1009 __u64 logical = 0, phys = 0, size = 0; 1010 __u32 flags = 0; 1011 loff_t isize; 1012 sector_t blkoff, end_blkoff; 1013 sector_t delalloc_blkoff; 1014 unsigned long delalloc_blklen; 1015 unsigned int blkbits = inode->i_blkbits; 1016 int ret, n; 1017 1018 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 1019 if (ret) 1020 return ret; 1021 1022 mutex_lock(&inode->i_mutex); 1023 1024 isize = i_size_read(inode); 1025 1026 blkoff = start >> blkbits; 1027 end_blkoff = (start + len - 1) >> blkbits; 1028 1029 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1030 &delalloc_blkoff); 1031 1032 do { 1033 __u64 blkphy; 1034 unsigned int maxblocks; 1035 1036 if (delalloc_blklen && blkoff == delalloc_blkoff) { 1037 if (size) { 1038 /* End of the current extent */ 1039 ret = fiemap_fill_next_extent( 1040 fieinfo, logical, phys, size, flags); 1041 if (ret) 1042 break; 1043 } 1044 if (blkoff > end_blkoff) 1045 break; 1046 1047 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1048 logical = blkoff << blkbits; 1049 phys = 0; 1050 size = delalloc_blklen << blkbits; 1051 1052 blkoff = delalloc_blkoff + delalloc_blklen; 1053 delalloc_blklen = nilfs_find_uncommitted_extent( 1054 inode, blkoff, &delalloc_blkoff); 1055 continue; 1056 } 1057 1058 /* 1059 * Limit the number of blocks that we look up so as 1060 * not to get into the next delayed allocation extent. 1061 */ 1062 maxblocks = INT_MAX; 1063 if (delalloc_blklen) 1064 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1065 maxblocks); 1066 blkphy = 0; 1067 1068 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1069 n = nilfs_bmap_lookup_contig( 1070 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1071 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1072 1073 if (n < 0) { 1074 int past_eof; 1075 1076 if (unlikely(n != -ENOENT)) 1077 break; /* error */ 1078 1079 /* HOLE */ 1080 blkoff++; 1081 past_eof = ((blkoff << blkbits) >= isize); 1082 1083 if (size) { 1084 /* End of the current extent */ 1085 1086 if (past_eof) 1087 flags |= FIEMAP_EXTENT_LAST; 1088 1089 ret = fiemap_fill_next_extent( 1090 fieinfo, logical, phys, size, flags); 1091 if (ret) 1092 break; 1093 size = 0; 1094 } 1095 if (blkoff > end_blkoff || past_eof) 1096 break; 1097 } else { 1098 if (size) { 1099 if (phys && blkphy << blkbits == phys + size) { 1100 /* The current extent goes on */ 1101 size += n << blkbits; 1102 } else { 1103 /* Terminate the current extent */ 1104 ret = fiemap_fill_next_extent( 1105 fieinfo, logical, phys, size, 1106 flags); 1107 if (ret || blkoff > end_blkoff) 1108 break; 1109 1110 /* Start another extent */ 1111 flags = FIEMAP_EXTENT_MERGED; 1112 logical = blkoff << blkbits; 1113 phys = blkphy << blkbits; 1114 size = n << blkbits; 1115 } 1116 } else { 1117 /* Start a new extent */ 1118 flags = FIEMAP_EXTENT_MERGED; 1119 logical = blkoff << blkbits; 1120 phys = blkphy << blkbits; 1121 size = n << blkbits; 1122 } 1123 blkoff += n; 1124 } 1125 cond_resched(); 1126 } while (true); 1127 1128 /* If ret is 1 then we just hit the end of the extent array */ 1129 if (ret == 1) 1130 ret = 0; 1131 1132 mutex_unlock(&inode->i_mutex); 1133 return ret; 1134 } 1135