1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/mpage.h> 26 #include <linux/writeback.h> 27 #include <linux/uio.h> 28 #include "nilfs.h" 29 #include "segment.h" 30 #include "page.h" 31 #include "mdt.h" 32 #include "cpfile.h" 33 #include "ifile.h" 34 35 36 /** 37 * nilfs_get_block() - get a file block on the filesystem (callback function) 38 * @inode - inode struct of the target file 39 * @blkoff - file block number 40 * @bh_result - buffer head to be mapped on 41 * @create - indicate whether allocating the block or not when it has not 42 * been allocated yet. 43 * 44 * This function does not issue actual read request of the specified data 45 * block. It is done by VFS. 46 */ 47 int nilfs_get_block(struct inode *inode, sector_t blkoff, 48 struct buffer_head *bh_result, int create) 49 { 50 struct nilfs_inode_info *ii = NILFS_I(inode); 51 __u64 blknum = 0; 52 int err = 0, ret; 53 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 54 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 55 56 down_read(&NILFS_MDT(dat)->mi_sem); 57 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 58 up_read(&NILFS_MDT(dat)->mi_sem); 59 if (ret >= 0) { /* found */ 60 map_bh(bh_result, inode->i_sb, blknum); 61 if (ret > 0) 62 bh_result->b_size = (ret << inode->i_blkbits); 63 goto out; 64 } 65 /* data block was not found */ 66 if (ret == -ENOENT && create) { 67 struct nilfs_transaction_info ti; 68 69 bh_result->b_blocknr = 0; 70 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 71 if (unlikely(err)) 72 goto out; 73 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 74 (unsigned long)bh_result); 75 if (unlikely(err != 0)) { 76 if (err == -EEXIST) { 77 /* 78 * The get_block() function could be called 79 * from multiple callers for an inode. 80 * However, the page having this block must 81 * be locked in this case. 82 */ 83 printk(KERN_WARNING 84 "nilfs_get_block: a race condition " 85 "while inserting a data block. " 86 "(inode number=%lu, file block " 87 "offset=%llu)\n", 88 inode->i_ino, 89 (unsigned long long)blkoff); 90 err = 0; 91 } else if (err == -EINVAL) { 92 nilfs_error(inode->i_sb, __func__, 93 "broken bmap (inode=%lu)\n", 94 inode->i_ino); 95 err = -EIO; 96 } 97 nilfs_transaction_abort(inode->i_sb); 98 goto out; 99 } 100 nilfs_transaction_commit(inode->i_sb); /* never fails */ 101 /* Error handling should be detailed */ 102 set_buffer_new(bh_result); 103 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 104 to proper value */ 105 } else if (ret == -ENOENT) { 106 /* not found is not error (e.g. hole); must return without 107 the mapped state flag. */ 108 ; 109 } else { 110 err = ret; 111 } 112 113 out: 114 return err; 115 } 116 117 /** 118 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 119 * address_space_operations. 120 * @file - file struct of the file to be read 121 * @page - the page to be read 122 */ 123 static int nilfs_readpage(struct file *file, struct page *page) 124 { 125 return mpage_readpage(page, nilfs_get_block); 126 } 127 128 /** 129 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 130 * address_space_operations. 131 * @file - file struct of the file to be read 132 * @mapping - address_space struct used for reading multiple pages 133 * @pages - the pages to be read 134 * @nr_pages - number of pages to be read 135 */ 136 static int nilfs_readpages(struct file *file, struct address_space *mapping, 137 struct list_head *pages, unsigned nr_pages) 138 { 139 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 140 } 141 142 static int nilfs_writepages(struct address_space *mapping, 143 struct writeback_control *wbc) 144 { 145 struct inode *inode = mapping->host; 146 int err = 0; 147 148 if (wbc->sync_mode == WB_SYNC_ALL) 149 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 150 wbc->range_start, 151 wbc->range_end); 152 return err; 153 } 154 155 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 156 { 157 struct inode *inode = page->mapping->host; 158 int err; 159 160 redirty_page_for_writepage(wbc, page); 161 unlock_page(page); 162 163 if (wbc->sync_mode == WB_SYNC_ALL) { 164 err = nilfs_construct_segment(inode->i_sb); 165 if (unlikely(err)) 166 return err; 167 } else if (wbc->for_reclaim) 168 nilfs_flush_segment(inode->i_sb, inode->i_ino); 169 170 return 0; 171 } 172 173 static int nilfs_set_page_dirty(struct page *page) 174 { 175 int ret = __set_page_dirty_buffers(page); 176 177 if (ret) { 178 struct inode *inode = page->mapping->host; 179 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 180 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 181 182 nilfs_set_file_dirty(sbi, inode, nr_dirty); 183 } 184 return ret; 185 } 186 187 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 188 loff_t pos, unsigned len, unsigned flags, 189 struct page **pagep, void **fsdata) 190 191 { 192 struct inode *inode = mapping->host; 193 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 194 195 if (unlikely(err)) 196 return err; 197 198 *pagep = NULL; 199 err = block_write_begin(file, mapping, pos, len, flags, pagep, 200 fsdata, nilfs_get_block); 201 if (unlikely(err)) 202 nilfs_transaction_abort(inode->i_sb); 203 return err; 204 } 205 206 static int nilfs_write_end(struct file *file, struct address_space *mapping, 207 loff_t pos, unsigned len, unsigned copied, 208 struct page *page, void *fsdata) 209 { 210 struct inode *inode = mapping->host; 211 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 212 unsigned nr_dirty; 213 int err; 214 215 nr_dirty = nilfs_page_count_clean_buffers(page, start, 216 start + copied); 217 copied = generic_write_end(file, mapping, pos, len, copied, page, 218 fsdata); 219 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 220 err = nilfs_transaction_commit(inode->i_sb); 221 return err ? : copied; 222 } 223 224 static ssize_t 225 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 226 loff_t offset, unsigned long nr_segs) 227 { 228 struct file *file = iocb->ki_filp; 229 struct inode *inode = file->f_mapping->host; 230 ssize_t size; 231 232 if (rw == WRITE) 233 return 0; 234 235 /* Needs synchronization with the cleaner */ 236 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 237 offset, nr_segs, nilfs_get_block, NULL); 238 return size; 239 } 240 241 const struct address_space_operations nilfs_aops = { 242 .writepage = nilfs_writepage, 243 .readpage = nilfs_readpage, 244 .sync_page = block_sync_page, 245 .writepages = nilfs_writepages, 246 .set_page_dirty = nilfs_set_page_dirty, 247 .readpages = nilfs_readpages, 248 .write_begin = nilfs_write_begin, 249 .write_end = nilfs_write_end, 250 /* .releasepage = nilfs_releasepage, */ 251 .invalidatepage = block_invalidatepage, 252 .direct_IO = nilfs_direct_IO, 253 .is_partially_uptodate = block_is_partially_uptodate, 254 }; 255 256 struct inode *nilfs_new_inode(struct inode *dir, int mode) 257 { 258 struct super_block *sb = dir->i_sb; 259 struct nilfs_sb_info *sbi = NILFS_SB(sb); 260 struct inode *inode; 261 struct nilfs_inode_info *ii; 262 int err = -ENOMEM; 263 ino_t ino; 264 265 inode = new_inode(sb); 266 if (unlikely(!inode)) 267 goto failed; 268 269 mapping_set_gfp_mask(inode->i_mapping, 270 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 271 272 ii = NILFS_I(inode); 273 ii->i_state = 1 << NILFS_I_NEW; 274 275 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 276 if (unlikely(err)) 277 goto failed_ifile_create_inode; 278 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 279 280 atomic_inc(&sbi->s_inodes_count); 281 282 inode->i_uid = current_fsuid(); 283 if (dir->i_mode & S_ISGID) { 284 inode->i_gid = dir->i_gid; 285 if (S_ISDIR(mode)) 286 mode |= S_ISGID; 287 } else 288 inode->i_gid = current_fsgid(); 289 290 inode->i_mode = mode; 291 inode->i_ino = ino; 292 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 293 294 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 295 err = nilfs_bmap_read(ii->i_bmap, NULL); 296 if (err < 0) 297 goto failed_bmap; 298 299 set_bit(NILFS_I_BMAP, &ii->i_state); 300 /* No lock is needed; iget() ensures it. */ 301 } 302 303 ii->i_flags = NILFS_I(dir)->i_flags; 304 if (S_ISLNK(mode)) 305 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 306 if (!S_ISDIR(mode)) 307 ii->i_flags &= ~NILFS_DIRSYNC_FL; 308 309 /* ii->i_file_acl = 0; */ 310 /* ii->i_dir_acl = 0; */ 311 ii->i_dir_start_lookup = 0; 312 ii->i_cno = 0; 313 nilfs_set_inode_flags(inode); 314 spin_lock(&sbi->s_next_gen_lock); 315 inode->i_generation = sbi->s_next_generation++; 316 spin_unlock(&sbi->s_next_gen_lock); 317 insert_inode_hash(inode); 318 319 err = nilfs_init_acl(inode, dir); 320 if (unlikely(err)) 321 goto failed_acl; /* never occur. When supporting 322 nilfs_init_acl(), proper cancellation of 323 above jobs should be considered */ 324 325 mark_inode_dirty(inode); 326 return inode; 327 328 failed_acl: 329 failed_bmap: 330 inode->i_nlink = 0; 331 iput(inode); /* raw_inode will be deleted through 332 generic_delete_inode() */ 333 goto failed; 334 335 failed_ifile_create_inode: 336 make_bad_inode(inode); 337 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 338 called */ 339 failed: 340 return ERR_PTR(err); 341 } 342 343 void nilfs_free_inode(struct inode *inode) 344 { 345 struct super_block *sb = inode->i_sb; 346 struct nilfs_sb_info *sbi = NILFS_SB(sb); 347 348 clear_inode(inode); 349 /* XXX: check error code? Is there any thing I can do? */ 350 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 351 atomic_dec(&sbi->s_inodes_count); 352 } 353 354 void nilfs_set_inode_flags(struct inode *inode) 355 { 356 unsigned int flags = NILFS_I(inode)->i_flags; 357 358 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 359 S_DIRSYNC); 360 if (flags & NILFS_SYNC_FL) 361 inode->i_flags |= S_SYNC; 362 if (flags & NILFS_APPEND_FL) 363 inode->i_flags |= S_APPEND; 364 if (flags & NILFS_IMMUTABLE_FL) 365 inode->i_flags |= S_IMMUTABLE; 366 #ifndef NILFS_ATIME_DISABLE 367 if (flags & NILFS_NOATIME_FL) 368 #endif 369 inode->i_flags |= S_NOATIME; 370 if (flags & NILFS_DIRSYNC_FL) 371 inode->i_flags |= S_DIRSYNC; 372 mapping_set_gfp_mask(inode->i_mapping, 373 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 374 } 375 376 int nilfs_read_inode_common(struct inode *inode, 377 struct nilfs_inode *raw_inode) 378 { 379 struct nilfs_inode_info *ii = NILFS_I(inode); 380 int err; 381 382 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 383 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 384 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 385 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 386 inode->i_size = le64_to_cpu(raw_inode->i_size); 387 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 388 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 389 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 390 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 391 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 392 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 393 if (inode->i_nlink == 0 && inode->i_mode == 0) 394 return -EINVAL; /* this inode is deleted */ 395 396 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 397 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 398 #if 0 399 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 400 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 401 0 : le32_to_cpu(raw_inode->i_dir_acl); 402 #endif 403 ii->i_dir_start_lookup = 0; 404 ii->i_cno = 0; 405 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 406 407 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 408 S_ISLNK(inode->i_mode)) { 409 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 410 if (err < 0) 411 return err; 412 set_bit(NILFS_I_BMAP, &ii->i_state); 413 /* No lock is needed; iget() ensures it. */ 414 } 415 return 0; 416 } 417 418 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 419 struct inode *inode) 420 { 421 struct nilfs_sb_info *sbi = NILFS_SB(sb); 422 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 423 struct buffer_head *bh; 424 struct nilfs_inode *raw_inode; 425 int err; 426 427 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 428 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 429 if (unlikely(err)) 430 goto bad_inode; 431 432 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 433 434 err = nilfs_read_inode_common(inode, raw_inode); 435 if (err) 436 goto failed_unmap; 437 438 if (S_ISREG(inode->i_mode)) { 439 inode->i_op = &nilfs_file_inode_operations; 440 inode->i_fop = &nilfs_file_operations; 441 inode->i_mapping->a_ops = &nilfs_aops; 442 } else if (S_ISDIR(inode->i_mode)) { 443 inode->i_op = &nilfs_dir_inode_operations; 444 inode->i_fop = &nilfs_dir_operations; 445 inode->i_mapping->a_ops = &nilfs_aops; 446 } else if (S_ISLNK(inode->i_mode)) { 447 inode->i_op = &nilfs_symlink_inode_operations; 448 inode->i_mapping->a_ops = &nilfs_aops; 449 } else { 450 inode->i_op = &nilfs_special_inode_operations; 451 init_special_inode( 452 inode, inode->i_mode, 453 new_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 454 } 455 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 456 brelse(bh); 457 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 458 nilfs_set_inode_flags(inode); 459 return 0; 460 461 failed_unmap: 462 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 463 brelse(bh); 464 465 bad_inode: 466 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 467 return err; 468 } 469 470 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 471 { 472 struct inode *inode; 473 int err; 474 475 inode = iget_locked(sb, ino); 476 if (unlikely(!inode)) 477 return ERR_PTR(-ENOMEM); 478 if (!(inode->i_state & I_NEW)) 479 return inode; 480 481 err = __nilfs_read_inode(sb, ino, inode); 482 if (unlikely(err)) { 483 iget_failed(inode); 484 return ERR_PTR(err); 485 } 486 unlock_new_inode(inode); 487 return inode; 488 } 489 490 void nilfs_write_inode_common(struct inode *inode, 491 struct nilfs_inode *raw_inode, int has_bmap) 492 { 493 struct nilfs_inode_info *ii = NILFS_I(inode); 494 495 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 496 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 497 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 498 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 499 raw_inode->i_size = cpu_to_le64(inode->i_size); 500 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 501 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 502 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 503 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 504 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 505 506 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 507 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 508 509 if (has_bmap) 510 nilfs_bmap_write(ii->i_bmap, raw_inode); 511 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 512 raw_inode->i_device_code = 513 cpu_to_le64(new_encode_dev(inode->i_rdev)); 514 /* When extending inode, nilfs->ns_inode_size should be checked 515 for substitutions of appended fields */ 516 } 517 518 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 519 { 520 ino_t ino = inode->i_ino; 521 struct nilfs_inode_info *ii = NILFS_I(inode); 522 struct super_block *sb = inode->i_sb; 523 struct nilfs_sb_info *sbi = NILFS_SB(sb); 524 struct nilfs_inode *raw_inode; 525 526 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 527 528 /* The buffer is guarded with lock_buffer() by the caller */ 529 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 530 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 531 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 532 533 nilfs_write_inode_common(inode, raw_inode, 0); 534 /* XXX: call with has_bmap = 0 is a workaround to avoid 535 deadlock of bmap. This delays update of i_bmap to just 536 before writing */ 537 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 538 } 539 540 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 541 542 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 543 unsigned long from) 544 { 545 unsigned long b; 546 int ret; 547 548 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 549 return; 550 repeat: 551 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 552 if (ret == -ENOENT) 553 return; 554 else if (ret < 0) 555 goto failed; 556 557 if (b < from) 558 return; 559 560 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 561 ret = nilfs_bmap_truncate(ii->i_bmap, b); 562 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 563 if (!ret || (ret == -ENOMEM && 564 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 565 goto repeat; 566 567 failed: 568 if (ret == -EINVAL) 569 nilfs_error(ii->vfs_inode.i_sb, __func__, 570 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 571 else 572 nilfs_warning(ii->vfs_inode.i_sb, __func__, 573 "failed to truncate bmap (ino=%lu, err=%d)", 574 ii->vfs_inode.i_ino, ret); 575 } 576 577 void nilfs_truncate(struct inode *inode) 578 { 579 unsigned long blkoff; 580 unsigned int blocksize; 581 struct nilfs_transaction_info ti; 582 struct super_block *sb = inode->i_sb; 583 struct nilfs_inode_info *ii = NILFS_I(inode); 584 585 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 586 return; 587 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 588 return; 589 590 blocksize = sb->s_blocksize; 591 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 592 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 593 594 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 595 596 nilfs_truncate_bmap(ii, blkoff); 597 598 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 599 if (IS_SYNC(inode)) 600 nilfs_set_transaction_flag(NILFS_TI_SYNC); 601 602 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 603 nilfs_transaction_commit(sb); 604 /* May construct a logical segment and may fail in sync mode. 605 But truncate has no return value. */ 606 } 607 608 void nilfs_delete_inode(struct inode *inode) 609 { 610 struct nilfs_transaction_info ti; 611 struct super_block *sb = inode->i_sb; 612 struct nilfs_inode_info *ii = NILFS_I(inode); 613 614 if (unlikely(is_bad_inode(inode))) { 615 if (inode->i_data.nrpages) 616 truncate_inode_pages(&inode->i_data, 0); 617 clear_inode(inode); 618 return; 619 } 620 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 621 622 if (inode->i_data.nrpages) 623 truncate_inode_pages(&inode->i_data, 0); 624 625 nilfs_truncate_bmap(ii, 0); 626 nilfs_free_inode(inode); 627 /* nilfs_free_inode() marks inode buffer dirty */ 628 if (IS_SYNC(inode)) 629 nilfs_set_transaction_flag(NILFS_TI_SYNC); 630 nilfs_transaction_commit(sb); 631 /* May construct a logical segment and may fail in sync mode. 632 But delete_inode has no return value. */ 633 } 634 635 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 636 { 637 struct nilfs_transaction_info ti; 638 struct inode *inode = dentry->d_inode; 639 struct super_block *sb = inode->i_sb; 640 int err; 641 642 err = inode_change_ok(inode, iattr); 643 if (err) 644 return err; 645 646 err = nilfs_transaction_begin(sb, &ti, 0); 647 if (unlikely(err)) 648 return err; 649 err = inode_setattr(inode, iattr); 650 if (!err && (iattr->ia_valid & ATTR_MODE)) 651 err = nilfs_acl_chmod(inode); 652 if (likely(!err)) 653 err = nilfs_transaction_commit(sb); 654 else 655 nilfs_transaction_abort(sb); 656 657 return err; 658 } 659 660 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 661 struct buffer_head **pbh) 662 { 663 struct nilfs_inode_info *ii = NILFS_I(inode); 664 int err; 665 666 spin_lock(&sbi->s_inode_lock); 667 /* Caller of this function MUST lock s_inode_lock */ 668 if (ii->i_bh == NULL) { 669 spin_unlock(&sbi->s_inode_lock); 670 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 671 pbh); 672 if (unlikely(err)) 673 return err; 674 spin_lock(&sbi->s_inode_lock); 675 if (ii->i_bh == NULL) 676 ii->i_bh = *pbh; 677 else { 678 brelse(*pbh); 679 *pbh = ii->i_bh; 680 } 681 } else 682 *pbh = ii->i_bh; 683 684 get_bh(*pbh); 685 spin_unlock(&sbi->s_inode_lock); 686 return 0; 687 } 688 689 int nilfs_inode_dirty(struct inode *inode) 690 { 691 struct nilfs_inode_info *ii = NILFS_I(inode); 692 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 693 int ret = 0; 694 695 if (!list_empty(&ii->i_dirty)) { 696 spin_lock(&sbi->s_inode_lock); 697 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 698 test_bit(NILFS_I_BUSY, &ii->i_state); 699 spin_unlock(&sbi->s_inode_lock); 700 } 701 return ret; 702 } 703 704 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 705 unsigned nr_dirty) 706 { 707 struct nilfs_inode_info *ii = NILFS_I(inode); 708 709 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 710 711 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 712 return 0; 713 714 spin_lock(&sbi->s_inode_lock); 715 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 716 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 717 /* Because this routine may race with nilfs_dispose_list(), 718 we have to check NILFS_I_QUEUED here, too. */ 719 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 720 /* This will happen when somebody is freeing 721 this inode. */ 722 nilfs_warning(sbi->s_super, __func__, 723 "cannot get inode (ino=%lu)\n", 724 inode->i_ino); 725 spin_unlock(&sbi->s_inode_lock); 726 return -EINVAL; /* NILFS_I_DIRTY may remain for 727 freeing inode */ 728 } 729 list_del(&ii->i_dirty); 730 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 731 set_bit(NILFS_I_QUEUED, &ii->i_state); 732 } 733 spin_unlock(&sbi->s_inode_lock); 734 return 0; 735 } 736 737 int nilfs_mark_inode_dirty(struct inode *inode) 738 { 739 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 740 struct buffer_head *ibh; 741 int err; 742 743 err = nilfs_load_inode_block(sbi, inode, &ibh); 744 if (unlikely(err)) { 745 nilfs_warning(inode->i_sb, __func__, 746 "failed to reget inode block.\n"); 747 return err; 748 } 749 lock_buffer(ibh); 750 nilfs_update_inode(inode, ibh); 751 unlock_buffer(ibh); 752 nilfs_mdt_mark_buffer_dirty(ibh); 753 nilfs_mdt_mark_dirty(sbi->s_ifile); 754 brelse(ibh); 755 return 0; 756 } 757 758 /** 759 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 760 * @inode: inode of the file to be registered. 761 * 762 * nilfs_dirty_inode() loads a inode block containing the specified 763 * @inode and copies data from a nilfs_inode to a corresponding inode 764 * entry in the inode block. This operation is excluded from the segment 765 * construction. This function can be called both as a single operation 766 * and as a part of indivisible file operations. 767 */ 768 void nilfs_dirty_inode(struct inode *inode) 769 { 770 struct nilfs_transaction_info ti; 771 772 if (is_bad_inode(inode)) { 773 nilfs_warning(inode->i_sb, __func__, 774 "tried to mark bad_inode dirty. ignored.\n"); 775 dump_stack(); 776 return; 777 } 778 nilfs_transaction_begin(inode->i_sb, &ti, 0); 779 nilfs_mark_inode_dirty(inode); 780 nilfs_transaction_commit(inode->i_sb); /* never fails */ 781 } 782