1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/mpage.h> 26 #include <linux/writeback.h> 27 #include <linux/uio.h> 28 #include "nilfs.h" 29 #include "segment.h" 30 #include "page.h" 31 #include "mdt.h" 32 #include "cpfile.h" 33 #include "ifile.h" 34 35 36 /** 37 * nilfs_get_block() - get a file block on the filesystem (callback function) 38 * @inode - inode struct of the target file 39 * @blkoff - file block number 40 * @bh_result - buffer head to be mapped on 41 * @create - indicate whether allocating the block or not when it has not 42 * been allocated yet. 43 * 44 * This function does not issue actual read request of the specified data 45 * block. It is done by VFS. 46 */ 47 int nilfs_get_block(struct inode *inode, sector_t blkoff, 48 struct buffer_head *bh_result, int create) 49 { 50 struct nilfs_inode_info *ii = NILFS_I(inode); 51 __u64 blknum = 0; 52 int err = 0, ret; 53 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 54 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 55 56 down_read(&NILFS_MDT(dat)->mi_sem); 57 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 58 up_read(&NILFS_MDT(dat)->mi_sem); 59 if (ret >= 0) { /* found */ 60 map_bh(bh_result, inode->i_sb, blknum); 61 if (ret > 0) 62 bh_result->b_size = (ret << inode->i_blkbits); 63 goto out; 64 } 65 /* data block was not found */ 66 if (ret == -ENOENT && create) { 67 struct nilfs_transaction_info ti; 68 69 bh_result->b_blocknr = 0; 70 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 71 if (unlikely(err)) 72 goto out; 73 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 74 (unsigned long)bh_result); 75 if (unlikely(err != 0)) { 76 if (err == -EEXIST) { 77 /* 78 * The get_block() function could be called 79 * from multiple callers for an inode. 80 * However, the page having this block must 81 * be locked in this case. 82 */ 83 printk(KERN_WARNING 84 "nilfs_get_block: a race condition " 85 "while inserting a data block. " 86 "(inode number=%lu, file block " 87 "offset=%llu)\n", 88 inode->i_ino, 89 (unsigned long long)blkoff); 90 err = 0; 91 } else if (err == -EINVAL) { 92 nilfs_error(inode->i_sb, __func__, 93 "broken bmap (inode=%lu)\n", 94 inode->i_ino); 95 err = -EIO; 96 } 97 nilfs_transaction_abort(inode->i_sb); 98 goto out; 99 } 100 nilfs_transaction_commit(inode->i_sb); /* never fails */ 101 /* Error handling should be detailed */ 102 set_buffer_new(bh_result); 103 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 104 to proper value */ 105 } else if (ret == -ENOENT) { 106 /* not found is not error (e.g. hole); must return without 107 the mapped state flag. */ 108 ; 109 } else { 110 err = ret; 111 } 112 113 out: 114 return err; 115 } 116 117 /** 118 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 119 * address_space_operations. 120 * @file - file struct of the file to be read 121 * @page - the page to be read 122 */ 123 static int nilfs_readpage(struct file *file, struct page *page) 124 { 125 return mpage_readpage(page, nilfs_get_block); 126 } 127 128 /** 129 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 130 * address_space_operations. 131 * @file - file struct of the file to be read 132 * @mapping - address_space struct used for reading multiple pages 133 * @pages - the pages to be read 134 * @nr_pages - number of pages to be read 135 */ 136 static int nilfs_readpages(struct file *file, struct address_space *mapping, 137 struct list_head *pages, unsigned nr_pages) 138 { 139 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 140 } 141 142 static int nilfs_writepages(struct address_space *mapping, 143 struct writeback_control *wbc) 144 { 145 struct inode *inode = mapping->host; 146 int err = 0; 147 148 if (wbc->sync_mode == WB_SYNC_ALL) 149 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 150 wbc->range_start, 151 wbc->range_end); 152 return err; 153 } 154 155 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 156 { 157 struct inode *inode = page->mapping->host; 158 int err; 159 160 redirty_page_for_writepage(wbc, page); 161 unlock_page(page); 162 163 if (wbc->sync_mode == WB_SYNC_ALL) { 164 err = nilfs_construct_segment(inode->i_sb); 165 if (unlikely(err)) 166 return err; 167 } else if (wbc->for_reclaim) 168 nilfs_flush_segment(inode->i_sb, inode->i_ino); 169 170 return 0; 171 } 172 173 static int nilfs_set_page_dirty(struct page *page) 174 { 175 int ret = __set_page_dirty_buffers(page); 176 177 if (ret) { 178 struct inode *inode = page->mapping->host; 179 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 180 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 181 182 nilfs_set_file_dirty(sbi, inode, nr_dirty); 183 } 184 return ret; 185 } 186 187 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 188 loff_t pos, unsigned len, unsigned flags, 189 struct page **pagep, void **fsdata) 190 191 { 192 struct inode *inode = mapping->host; 193 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 194 195 if (unlikely(err)) 196 return err; 197 198 *pagep = NULL; 199 err = block_write_begin(file, mapping, pos, len, flags, pagep, 200 fsdata, nilfs_get_block); 201 if (unlikely(err)) 202 nilfs_transaction_abort(inode->i_sb); 203 return err; 204 } 205 206 static int nilfs_write_end(struct file *file, struct address_space *mapping, 207 loff_t pos, unsigned len, unsigned copied, 208 struct page *page, void *fsdata) 209 { 210 struct inode *inode = mapping->host; 211 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 212 unsigned nr_dirty; 213 int err; 214 215 nr_dirty = nilfs_page_count_clean_buffers(page, start, 216 start + copied); 217 copied = generic_write_end(file, mapping, pos, len, copied, page, 218 fsdata); 219 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 220 err = nilfs_transaction_commit(inode->i_sb); 221 return err ? : copied; 222 } 223 224 static ssize_t 225 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 226 loff_t offset, unsigned long nr_segs) 227 { 228 struct file *file = iocb->ki_filp; 229 struct inode *inode = file->f_mapping->host; 230 ssize_t size; 231 232 if (rw == WRITE) 233 return 0; 234 235 /* Needs synchronization with the cleaner */ 236 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 237 offset, nr_segs, nilfs_get_block, NULL); 238 return size; 239 } 240 241 struct address_space_operations nilfs_aops = { 242 .writepage = nilfs_writepage, 243 .readpage = nilfs_readpage, 244 .sync_page = block_sync_page, 245 .writepages = nilfs_writepages, 246 .set_page_dirty = nilfs_set_page_dirty, 247 .readpages = nilfs_readpages, 248 .write_begin = nilfs_write_begin, 249 .write_end = nilfs_write_end, 250 /* .releasepage = nilfs_releasepage, */ 251 .invalidatepage = block_invalidatepage, 252 .direct_IO = nilfs_direct_IO, 253 .is_partially_uptodate = block_is_partially_uptodate, 254 }; 255 256 struct inode *nilfs_new_inode(struct inode *dir, int mode) 257 { 258 struct super_block *sb = dir->i_sb; 259 struct nilfs_sb_info *sbi = NILFS_SB(sb); 260 struct inode *inode; 261 struct nilfs_inode_info *ii; 262 int err = -ENOMEM; 263 ino_t ino; 264 265 inode = new_inode(sb); 266 if (unlikely(!inode)) 267 goto failed; 268 269 mapping_set_gfp_mask(inode->i_mapping, 270 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 271 272 ii = NILFS_I(inode); 273 ii->i_state = 1 << NILFS_I_NEW; 274 275 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 276 if (unlikely(err)) 277 goto failed_ifile_create_inode; 278 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 279 280 atomic_inc(&sbi->s_inodes_count); 281 282 inode->i_uid = current_fsuid(); 283 if (dir->i_mode & S_ISGID) { 284 inode->i_gid = dir->i_gid; 285 if (S_ISDIR(mode)) 286 mode |= S_ISGID; 287 } else 288 inode->i_gid = current_fsgid(); 289 290 inode->i_mode = mode; 291 inode->i_ino = ino; 292 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 293 294 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 295 err = nilfs_bmap_read(ii->i_bmap, NULL); 296 if (err < 0) 297 goto failed_bmap; 298 299 set_bit(NILFS_I_BMAP, &ii->i_state); 300 /* No lock is needed; iget() ensures it. */ 301 } 302 303 ii->i_flags = NILFS_I(dir)->i_flags; 304 if (S_ISLNK(mode)) 305 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 306 if (!S_ISDIR(mode)) 307 ii->i_flags &= ~NILFS_DIRSYNC_FL; 308 309 /* ii->i_file_acl = 0; */ 310 /* ii->i_dir_acl = 0; */ 311 ii->i_dir_start_lookup = 0; 312 ii->i_cno = 0; 313 nilfs_set_inode_flags(inode); 314 spin_lock(&sbi->s_next_gen_lock); 315 inode->i_generation = sbi->s_next_generation++; 316 spin_unlock(&sbi->s_next_gen_lock); 317 insert_inode_hash(inode); 318 319 err = nilfs_init_acl(inode, dir); 320 if (unlikely(err)) 321 goto failed_acl; /* never occur. When supporting 322 nilfs_init_acl(), proper cancellation of 323 above jobs should be considered */ 324 325 mark_inode_dirty(inode); 326 return inode; 327 328 failed_acl: 329 failed_bmap: 330 inode->i_nlink = 0; 331 iput(inode); /* raw_inode will be deleted through 332 generic_delete_inode() */ 333 goto failed; 334 335 failed_ifile_create_inode: 336 make_bad_inode(inode); 337 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 338 called */ 339 failed: 340 return ERR_PTR(err); 341 } 342 343 void nilfs_free_inode(struct inode *inode) 344 { 345 struct super_block *sb = inode->i_sb; 346 struct nilfs_sb_info *sbi = NILFS_SB(sb); 347 348 clear_inode(inode); 349 /* XXX: check error code? Is there any thing I can do? */ 350 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 351 atomic_dec(&sbi->s_inodes_count); 352 } 353 354 void nilfs_set_inode_flags(struct inode *inode) 355 { 356 unsigned int flags = NILFS_I(inode)->i_flags; 357 358 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 359 S_DIRSYNC); 360 if (flags & NILFS_SYNC_FL) 361 inode->i_flags |= S_SYNC; 362 if (flags & NILFS_APPEND_FL) 363 inode->i_flags |= S_APPEND; 364 if (flags & NILFS_IMMUTABLE_FL) 365 inode->i_flags |= S_IMMUTABLE; 366 #ifndef NILFS_ATIME_DISABLE 367 if (flags & NILFS_NOATIME_FL) 368 #endif 369 inode->i_flags |= S_NOATIME; 370 if (flags & NILFS_DIRSYNC_FL) 371 inode->i_flags |= S_DIRSYNC; 372 mapping_set_gfp_mask(inode->i_mapping, 373 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 374 } 375 376 int nilfs_read_inode_common(struct inode *inode, 377 struct nilfs_inode *raw_inode) 378 { 379 struct nilfs_inode_info *ii = NILFS_I(inode); 380 int err; 381 382 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 383 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 384 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 385 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 386 inode->i_size = le64_to_cpu(raw_inode->i_size); 387 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 388 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 389 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 390 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 391 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 392 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 393 if (inode->i_nlink == 0 && inode->i_mode == 0) 394 return -EINVAL; /* this inode is deleted */ 395 396 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 397 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 398 #if 0 399 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 400 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 401 0 : le32_to_cpu(raw_inode->i_dir_acl); 402 #endif 403 ii->i_cno = 0; 404 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 405 406 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 407 S_ISLNK(inode->i_mode)) { 408 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 409 if (err < 0) 410 return err; 411 set_bit(NILFS_I_BMAP, &ii->i_state); 412 /* No lock is needed; iget() ensures it. */ 413 } 414 return 0; 415 } 416 417 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 418 struct inode *inode) 419 { 420 struct nilfs_sb_info *sbi = NILFS_SB(sb); 421 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 422 struct buffer_head *bh; 423 struct nilfs_inode *raw_inode; 424 int err; 425 426 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 427 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 428 if (unlikely(err)) 429 goto bad_inode; 430 431 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 432 433 err = nilfs_read_inode_common(inode, raw_inode); 434 if (err) 435 goto failed_unmap; 436 437 if (S_ISREG(inode->i_mode)) { 438 inode->i_op = &nilfs_file_inode_operations; 439 inode->i_fop = &nilfs_file_operations; 440 inode->i_mapping->a_ops = &nilfs_aops; 441 } else if (S_ISDIR(inode->i_mode)) { 442 inode->i_op = &nilfs_dir_inode_operations; 443 inode->i_fop = &nilfs_dir_operations; 444 inode->i_mapping->a_ops = &nilfs_aops; 445 } else if (S_ISLNK(inode->i_mode)) { 446 inode->i_op = &nilfs_symlink_inode_operations; 447 inode->i_mapping->a_ops = &nilfs_aops; 448 } else { 449 inode->i_op = &nilfs_special_inode_operations; 450 init_special_inode( 451 inode, inode->i_mode, 452 new_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 453 } 454 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 455 brelse(bh); 456 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 457 nilfs_set_inode_flags(inode); 458 return 0; 459 460 failed_unmap: 461 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 462 brelse(bh); 463 464 bad_inode: 465 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 466 return err; 467 } 468 469 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 470 { 471 struct inode *inode; 472 int err; 473 474 inode = iget_locked(sb, ino); 475 if (unlikely(!inode)) 476 return ERR_PTR(-ENOMEM); 477 if (!(inode->i_state & I_NEW)) 478 return inode; 479 480 err = __nilfs_read_inode(sb, ino, inode); 481 if (unlikely(err)) { 482 iget_failed(inode); 483 return ERR_PTR(err); 484 } 485 unlock_new_inode(inode); 486 return inode; 487 } 488 489 void nilfs_write_inode_common(struct inode *inode, 490 struct nilfs_inode *raw_inode, int has_bmap) 491 { 492 struct nilfs_inode_info *ii = NILFS_I(inode); 493 494 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 495 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 496 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 497 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 498 raw_inode->i_size = cpu_to_le64(inode->i_size); 499 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 500 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 501 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 502 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 503 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 504 505 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 506 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 507 508 if (has_bmap) 509 nilfs_bmap_write(ii->i_bmap, raw_inode); 510 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 511 raw_inode->i_device_code = 512 cpu_to_le64(new_encode_dev(inode->i_rdev)); 513 /* When extending inode, nilfs->ns_inode_size should be checked 514 for substitutions of appended fields */ 515 } 516 517 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 518 { 519 ino_t ino = inode->i_ino; 520 struct nilfs_inode_info *ii = NILFS_I(inode); 521 struct super_block *sb = inode->i_sb; 522 struct nilfs_sb_info *sbi = NILFS_SB(sb); 523 struct nilfs_inode *raw_inode; 524 525 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 526 527 /* The buffer is guarded with lock_buffer() by the caller */ 528 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 529 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 530 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 531 532 nilfs_write_inode_common(inode, raw_inode, 0); 533 /* XXX: call with has_bmap = 0 is a workaround to avoid 534 deadlock of bmap. This delays update of i_bmap to just 535 before writing */ 536 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 537 } 538 539 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 540 541 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 542 unsigned long from) 543 { 544 unsigned long b; 545 int ret; 546 547 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 548 return; 549 repeat: 550 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 551 if (ret == -ENOENT) 552 return; 553 else if (ret < 0) 554 goto failed; 555 556 if (b < from) 557 return; 558 559 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 560 ret = nilfs_bmap_truncate(ii->i_bmap, b); 561 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 562 if (!ret || (ret == -ENOMEM && 563 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 564 goto repeat; 565 566 failed: 567 if (ret == -EINVAL) 568 nilfs_error(ii->vfs_inode.i_sb, __func__, 569 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 570 else 571 nilfs_warning(ii->vfs_inode.i_sb, __func__, 572 "failed to truncate bmap (ino=%lu, err=%d)", 573 ii->vfs_inode.i_ino, ret); 574 } 575 576 void nilfs_truncate(struct inode *inode) 577 { 578 unsigned long blkoff; 579 unsigned int blocksize; 580 struct nilfs_transaction_info ti; 581 struct super_block *sb = inode->i_sb; 582 struct nilfs_inode_info *ii = NILFS_I(inode); 583 584 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 585 return; 586 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 587 return; 588 589 blocksize = sb->s_blocksize; 590 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 591 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 592 593 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 594 595 nilfs_truncate_bmap(ii, blkoff); 596 597 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 598 if (IS_SYNC(inode)) 599 nilfs_set_transaction_flag(NILFS_TI_SYNC); 600 601 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 602 nilfs_transaction_commit(sb); 603 /* May construct a logical segment and may fail in sync mode. 604 But truncate has no return value. */ 605 } 606 607 void nilfs_delete_inode(struct inode *inode) 608 { 609 struct nilfs_transaction_info ti; 610 struct super_block *sb = inode->i_sb; 611 struct nilfs_inode_info *ii = NILFS_I(inode); 612 613 if (unlikely(is_bad_inode(inode))) { 614 if (inode->i_data.nrpages) 615 truncate_inode_pages(&inode->i_data, 0); 616 clear_inode(inode); 617 return; 618 } 619 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 620 621 if (inode->i_data.nrpages) 622 truncate_inode_pages(&inode->i_data, 0); 623 624 nilfs_truncate_bmap(ii, 0); 625 nilfs_free_inode(inode); 626 /* nilfs_free_inode() marks inode buffer dirty */ 627 if (IS_SYNC(inode)) 628 nilfs_set_transaction_flag(NILFS_TI_SYNC); 629 nilfs_transaction_commit(sb); 630 /* May construct a logical segment and may fail in sync mode. 631 But delete_inode has no return value. */ 632 } 633 634 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 635 { 636 struct nilfs_transaction_info ti; 637 struct inode *inode = dentry->d_inode; 638 struct super_block *sb = inode->i_sb; 639 int err; 640 641 err = inode_change_ok(inode, iattr); 642 if (err) 643 return err; 644 645 err = nilfs_transaction_begin(sb, &ti, 0); 646 if (unlikely(err)) 647 return err; 648 err = inode_setattr(inode, iattr); 649 if (!err && (iattr->ia_valid & ATTR_MODE)) 650 err = nilfs_acl_chmod(inode); 651 if (likely(!err)) 652 err = nilfs_transaction_commit(sb); 653 else 654 nilfs_transaction_abort(sb); 655 656 return err; 657 } 658 659 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 660 struct buffer_head **pbh) 661 { 662 struct nilfs_inode_info *ii = NILFS_I(inode); 663 int err; 664 665 spin_lock(&sbi->s_inode_lock); 666 /* Caller of this function MUST lock s_inode_lock */ 667 if (ii->i_bh == NULL) { 668 spin_unlock(&sbi->s_inode_lock); 669 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 670 pbh); 671 if (unlikely(err)) 672 return err; 673 spin_lock(&sbi->s_inode_lock); 674 if (ii->i_bh == NULL) 675 ii->i_bh = *pbh; 676 else { 677 brelse(*pbh); 678 *pbh = ii->i_bh; 679 } 680 } else 681 *pbh = ii->i_bh; 682 683 get_bh(*pbh); 684 spin_unlock(&sbi->s_inode_lock); 685 return 0; 686 } 687 688 int nilfs_inode_dirty(struct inode *inode) 689 { 690 struct nilfs_inode_info *ii = NILFS_I(inode); 691 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 692 int ret = 0; 693 694 if (!list_empty(&ii->i_dirty)) { 695 spin_lock(&sbi->s_inode_lock); 696 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 697 test_bit(NILFS_I_BUSY, &ii->i_state); 698 spin_unlock(&sbi->s_inode_lock); 699 } 700 return ret; 701 } 702 703 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 704 unsigned nr_dirty) 705 { 706 struct nilfs_inode_info *ii = NILFS_I(inode); 707 708 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 709 710 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 711 return 0; 712 713 spin_lock(&sbi->s_inode_lock); 714 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 715 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 716 /* Because this routine may race with nilfs_dispose_list(), 717 we have to check NILFS_I_QUEUED here, too. */ 718 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 719 /* This will happen when somebody is freeing 720 this inode. */ 721 nilfs_warning(sbi->s_super, __func__, 722 "cannot get inode (ino=%lu)\n", 723 inode->i_ino); 724 spin_unlock(&sbi->s_inode_lock); 725 return -EINVAL; /* NILFS_I_DIRTY may remain for 726 freeing inode */ 727 } 728 list_del(&ii->i_dirty); 729 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 730 set_bit(NILFS_I_QUEUED, &ii->i_state); 731 } 732 spin_unlock(&sbi->s_inode_lock); 733 return 0; 734 } 735 736 int nilfs_mark_inode_dirty(struct inode *inode) 737 { 738 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 739 struct buffer_head *ibh; 740 int err; 741 742 err = nilfs_load_inode_block(sbi, inode, &ibh); 743 if (unlikely(err)) { 744 nilfs_warning(inode->i_sb, __func__, 745 "failed to reget inode block.\n"); 746 return err; 747 } 748 lock_buffer(ibh); 749 nilfs_update_inode(inode, ibh); 750 unlock_buffer(ibh); 751 nilfs_mdt_mark_buffer_dirty(ibh); 752 nilfs_mdt_mark_dirty(sbi->s_ifile); 753 brelse(ibh); 754 return 0; 755 } 756 757 /** 758 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 759 * @inode: inode of the file to be registered. 760 * 761 * nilfs_dirty_inode() loads a inode block containing the specified 762 * @inode and copies data from a nilfs_inode to a corresponding inode 763 * entry in the inode block. This operation is excluded from the segment 764 * construction. This function can be called both as a single operation 765 * and as a part of indivisible file operations. 766 */ 767 void nilfs_dirty_inode(struct inode *inode) 768 { 769 struct nilfs_transaction_info ti; 770 771 if (is_bad_inode(inode)) { 772 nilfs_warning(inode->i_sb, __func__, 773 "tried to mark bad_inode dirty. ignored.\n"); 774 dump_stack(); 775 return; 776 } 777 nilfs_transaction_begin(inode->i_sb, &ti, 0); 778 nilfs_mark_inode_dirty(inode); 779 nilfs_transaction_commit(inode->i_sb); /* never fails */ 780 } 781