1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/mpage.h> 26 #include <linux/writeback.h> 27 #include <linux/uio.h> 28 #include "nilfs.h" 29 #include "segment.h" 30 #include "page.h" 31 #include "mdt.h" 32 #include "cpfile.h" 33 #include "ifile.h" 34 35 36 /** 37 * nilfs_get_block() - get a file block on the filesystem (callback function) 38 * @inode - inode struct of the target file 39 * @blkoff - file block number 40 * @bh_result - buffer head to be mapped on 41 * @create - indicate whether allocating the block or not when it has not 42 * been allocated yet. 43 * 44 * This function does not issue actual read request of the specified data 45 * block. It is done by VFS. 46 */ 47 int nilfs_get_block(struct inode *inode, sector_t blkoff, 48 struct buffer_head *bh_result, int create) 49 { 50 struct nilfs_inode_info *ii = NILFS_I(inode); 51 __u64 blknum = 0; 52 int err = 0, ret; 53 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 54 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 55 56 down_read(&NILFS_MDT(dat)->mi_sem); 57 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 58 up_read(&NILFS_MDT(dat)->mi_sem); 59 if (ret >= 0) { /* found */ 60 map_bh(bh_result, inode->i_sb, blknum); 61 if (ret > 0) 62 bh_result->b_size = (ret << inode->i_blkbits); 63 goto out; 64 } 65 /* data block was not found */ 66 if (ret == -ENOENT && create) { 67 struct nilfs_transaction_info ti; 68 69 bh_result->b_blocknr = 0; 70 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 71 if (unlikely(err)) 72 goto out; 73 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 74 (unsigned long)bh_result); 75 if (unlikely(err != 0)) { 76 if (err == -EEXIST) { 77 /* 78 * The get_block() function could be called 79 * from multiple callers for an inode. 80 * However, the page having this block must 81 * be locked in this case. 82 */ 83 printk(KERN_WARNING 84 "nilfs_get_block: a race condition " 85 "while inserting a data block. " 86 "(inode number=%lu, file block " 87 "offset=%llu)\n", 88 inode->i_ino, 89 (unsigned long long)blkoff); 90 err = 0; 91 } else if (err == -EINVAL) { 92 nilfs_error(inode->i_sb, __func__, 93 "broken bmap (inode=%lu)\n", 94 inode->i_ino); 95 err = -EIO; 96 } 97 nilfs_transaction_abort(inode->i_sb); 98 goto out; 99 } 100 nilfs_mark_inode_dirty(inode); 101 nilfs_transaction_commit(inode->i_sb); /* never fails */ 102 /* Error handling should be detailed */ 103 set_buffer_new(bh_result); 104 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 105 to proper value */ 106 } else if (ret == -ENOENT) { 107 /* not found is not error (e.g. hole); must return without 108 the mapped state flag. */ 109 ; 110 } else { 111 err = ret; 112 } 113 114 out: 115 return err; 116 } 117 118 /** 119 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 120 * address_space_operations. 121 * @file - file struct of the file to be read 122 * @page - the page to be read 123 */ 124 static int nilfs_readpage(struct file *file, struct page *page) 125 { 126 return mpage_readpage(page, nilfs_get_block); 127 } 128 129 /** 130 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 131 * address_space_operations. 132 * @file - file struct of the file to be read 133 * @mapping - address_space struct used for reading multiple pages 134 * @pages - the pages to be read 135 * @nr_pages - number of pages to be read 136 */ 137 static int nilfs_readpages(struct file *file, struct address_space *mapping, 138 struct list_head *pages, unsigned nr_pages) 139 { 140 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 141 } 142 143 static int nilfs_writepages(struct address_space *mapping, 144 struct writeback_control *wbc) 145 { 146 struct inode *inode = mapping->host; 147 int err = 0; 148 149 if (wbc->sync_mode == WB_SYNC_ALL) 150 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 151 wbc->range_start, 152 wbc->range_end); 153 return err; 154 } 155 156 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 157 { 158 struct inode *inode = page->mapping->host; 159 int err; 160 161 redirty_page_for_writepage(wbc, page); 162 unlock_page(page); 163 164 if (wbc->sync_mode == WB_SYNC_ALL) { 165 err = nilfs_construct_segment(inode->i_sb); 166 if (unlikely(err)) 167 return err; 168 } else if (wbc->for_reclaim) 169 nilfs_flush_segment(inode->i_sb, inode->i_ino); 170 171 return 0; 172 } 173 174 static int nilfs_set_page_dirty(struct page *page) 175 { 176 int ret = __set_page_dirty_buffers(page); 177 178 if (ret) { 179 struct inode *inode = page->mapping->host; 180 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 181 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 182 183 nilfs_set_file_dirty(sbi, inode, nr_dirty); 184 } 185 return ret; 186 } 187 188 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 189 loff_t pos, unsigned len, unsigned flags, 190 struct page **pagep, void **fsdata) 191 192 { 193 struct inode *inode = mapping->host; 194 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 195 196 if (unlikely(err)) 197 return err; 198 199 *pagep = NULL; 200 err = block_write_begin(file, mapping, pos, len, flags, pagep, 201 fsdata, nilfs_get_block); 202 if (unlikely(err)) 203 nilfs_transaction_abort(inode->i_sb); 204 return err; 205 } 206 207 static int nilfs_write_end(struct file *file, struct address_space *mapping, 208 loff_t pos, unsigned len, unsigned copied, 209 struct page *page, void *fsdata) 210 { 211 struct inode *inode = mapping->host; 212 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 213 unsigned nr_dirty; 214 int err; 215 216 nr_dirty = nilfs_page_count_clean_buffers(page, start, 217 start + copied); 218 copied = generic_write_end(file, mapping, pos, len, copied, page, 219 fsdata); 220 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 221 err = nilfs_transaction_commit(inode->i_sb); 222 return err ? : copied; 223 } 224 225 static ssize_t 226 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 227 loff_t offset, unsigned long nr_segs) 228 { 229 struct file *file = iocb->ki_filp; 230 struct inode *inode = file->f_mapping->host; 231 ssize_t size; 232 233 if (rw == WRITE) 234 return 0; 235 236 /* Needs synchronization with the cleaner */ 237 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 238 offset, nr_segs, nilfs_get_block, NULL); 239 return size; 240 } 241 242 const struct address_space_operations nilfs_aops = { 243 .writepage = nilfs_writepage, 244 .readpage = nilfs_readpage, 245 .sync_page = block_sync_page, 246 .writepages = nilfs_writepages, 247 .set_page_dirty = nilfs_set_page_dirty, 248 .readpages = nilfs_readpages, 249 .write_begin = nilfs_write_begin, 250 .write_end = nilfs_write_end, 251 /* .releasepage = nilfs_releasepage, */ 252 .invalidatepage = block_invalidatepage, 253 .direct_IO = nilfs_direct_IO, 254 .is_partially_uptodate = block_is_partially_uptodate, 255 }; 256 257 struct inode *nilfs_new_inode(struct inode *dir, int mode) 258 { 259 struct super_block *sb = dir->i_sb; 260 struct nilfs_sb_info *sbi = NILFS_SB(sb); 261 struct inode *inode; 262 struct nilfs_inode_info *ii; 263 int err = -ENOMEM; 264 ino_t ino; 265 266 inode = new_inode(sb); 267 if (unlikely(!inode)) 268 goto failed; 269 270 mapping_set_gfp_mask(inode->i_mapping, 271 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 272 273 ii = NILFS_I(inode); 274 ii->i_state = 1 << NILFS_I_NEW; 275 276 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 277 if (unlikely(err)) 278 goto failed_ifile_create_inode; 279 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 280 281 atomic_inc(&sbi->s_inodes_count); 282 283 inode->i_uid = current_fsuid(); 284 if (dir->i_mode & S_ISGID) { 285 inode->i_gid = dir->i_gid; 286 if (S_ISDIR(mode)) 287 mode |= S_ISGID; 288 } else 289 inode->i_gid = current_fsgid(); 290 291 inode->i_mode = mode; 292 inode->i_ino = ino; 293 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 294 295 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 296 err = nilfs_bmap_read(ii->i_bmap, NULL); 297 if (err < 0) 298 goto failed_bmap; 299 300 set_bit(NILFS_I_BMAP, &ii->i_state); 301 /* No lock is needed; iget() ensures it. */ 302 } 303 304 ii->i_flags = NILFS_I(dir)->i_flags; 305 if (S_ISLNK(mode)) 306 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 307 if (!S_ISDIR(mode)) 308 ii->i_flags &= ~NILFS_DIRSYNC_FL; 309 310 /* ii->i_file_acl = 0; */ 311 /* ii->i_dir_acl = 0; */ 312 ii->i_dir_start_lookup = 0; 313 ii->i_cno = 0; 314 nilfs_set_inode_flags(inode); 315 spin_lock(&sbi->s_next_gen_lock); 316 inode->i_generation = sbi->s_next_generation++; 317 spin_unlock(&sbi->s_next_gen_lock); 318 insert_inode_hash(inode); 319 320 err = nilfs_init_acl(inode, dir); 321 if (unlikely(err)) 322 goto failed_acl; /* never occur. When supporting 323 nilfs_init_acl(), proper cancellation of 324 above jobs should be considered */ 325 326 return inode; 327 328 failed_acl: 329 failed_bmap: 330 inode->i_nlink = 0; 331 iput(inode); /* raw_inode will be deleted through 332 generic_delete_inode() */ 333 goto failed; 334 335 failed_ifile_create_inode: 336 make_bad_inode(inode); 337 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 338 called */ 339 failed: 340 return ERR_PTR(err); 341 } 342 343 void nilfs_free_inode(struct inode *inode) 344 { 345 struct super_block *sb = inode->i_sb; 346 struct nilfs_sb_info *sbi = NILFS_SB(sb); 347 348 clear_inode(inode); 349 /* XXX: check error code? Is there any thing I can do? */ 350 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 351 atomic_dec(&sbi->s_inodes_count); 352 } 353 354 void nilfs_set_inode_flags(struct inode *inode) 355 { 356 unsigned int flags = NILFS_I(inode)->i_flags; 357 358 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 359 S_DIRSYNC); 360 if (flags & NILFS_SYNC_FL) 361 inode->i_flags |= S_SYNC; 362 if (flags & NILFS_APPEND_FL) 363 inode->i_flags |= S_APPEND; 364 if (flags & NILFS_IMMUTABLE_FL) 365 inode->i_flags |= S_IMMUTABLE; 366 #ifndef NILFS_ATIME_DISABLE 367 if (flags & NILFS_NOATIME_FL) 368 #endif 369 inode->i_flags |= S_NOATIME; 370 if (flags & NILFS_DIRSYNC_FL) 371 inode->i_flags |= S_DIRSYNC; 372 mapping_set_gfp_mask(inode->i_mapping, 373 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 374 } 375 376 int nilfs_read_inode_common(struct inode *inode, 377 struct nilfs_inode *raw_inode) 378 { 379 struct nilfs_inode_info *ii = NILFS_I(inode); 380 int err; 381 382 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 383 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 384 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 385 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 386 inode->i_size = le64_to_cpu(raw_inode->i_size); 387 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 388 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 389 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 390 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 391 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 392 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 393 if (inode->i_nlink == 0 && inode->i_mode == 0) 394 return -EINVAL; /* this inode is deleted */ 395 396 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 397 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 398 #if 0 399 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 400 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 401 0 : le32_to_cpu(raw_inode->i_dir_acl); 402 #endif 403 ii->i_dir_start_lookup = 0; 404 ii->i_cno = 0; 405 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 406 407 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 408 S_ISLNK(inode->i_mode)) { 409 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 410 if (err < 0) 411 return err; 412 set_bit(NILFS_I_BMAP, &ii->i_state); 413 /* No lock is needed; iget() ensures it. */ 414 } 415 return 0; 416 } 417 418 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 419 struct inode *inode) 420 { 421 struct nilfs_sb_info *sbi = NILFS_SB(sb); 422 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 423 struct buffer_head *bh; 424 struct nilfs_inode *raw_inode; 425 int err; 426 427 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 428 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 429 if (unlikely(err)) 430 goto bad_inode; 431 432 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 433 434 err = nilfs_read_inode_common(inode, raw_inode); 435 if (err) 436 goto failed_unmap; 437 438 if (S_ISREG(inode->i_mode)) { 439 inode->i_op = &nilfs_file_inode_operations; 440 inode->i_fop = &nilfs_file_operations; 441 inode->i_mapping->a_ops = &nilfs_aops; 442 } else if (S_ISDIR(inode->i_mode)) { 443 inode->i_op = &nilfs_dir_inode_operations; 444 inode->i_fop = &nilfs_dir_operations; 445 inode->i_mapping->a_ops = &nilfs_aops; 446 } else if (S_ISLNK(inode->i_mode)) { 447 inode->i_op = &nilfs_symlink_inode_operations; 448 inode->i_mapping->a_ops = &nilfs_aops; 449 } else { 450 inode->i_op = &nilfs_special_inode_operations; 451 init_special_inode( 452 inode, inode->i_mode, 453 new_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 454 } 455 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 456 brelse(bh); 457 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 458 nilfs_set_inode_flags(inode); 459 return 0; 460 461 failed_unmap: 462 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 463 brelse(bh); 464 465 bad_inode: 466 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 467 return err; 468 } 469 470 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 471 { 472 struct inode *inode; 473 int err; 474 475 inode = iget_locked(sb, ino); 476 if (unlikely(!inode)) 477 return ERR_PTR(-ENOMEM); 478 if (!(inode->i_state & I_NEW)) 479 return inode; 480 481 err = __nilfs_read_inode(sb, ino, inode); 482 if (unlikely(err)) { 483 iget_failed(inode); 484 return ERR_PTR(err); 485 } 486 unlock_new_inode(inode); 487 return inode; 488 } 489 490 void nilfs_write_inode_common(struct inode *inode, 491 struct nilfs_inode *raw_inode, int has_bmap) 492 { 493 struct nilfs_inode_info *ii = NILFS_I(inode); 494 495 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 496 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 497 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 498 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 499 raw_inode->i_size = cpu_to_le64(inode->i_size); 500 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 501 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 502 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 503 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 504 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 505 506 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 507 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 508 509 if (has_bmap) 510 nilfs_bmap_write(ii->i_bmap, raw_inode); 511 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 512 raw_inode->i_device_code = 513 cpu_to_le64(new_encode_dev(inode->i_rdev)); 514 /* When extending inode, nilfs->ns_inode_size should be checked 515 for substitutions of appended fields */ 516 } 517 518 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 519 { 520 ino_t ino = inode->i_ino; 521 struct nilfs_inode_info *ii = NILFS_I(inode); 522 struct super_block *sb = inode->i_sb; 523 struct nilfs_sb_info *sbi = NILFS_SB(sb); 524 struct nilfs_inode *raw_inode; 525 526 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 527 528 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 529 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 530 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 531 532 nilfs_write_inode_common(inode, raw_inode, 0); 533 /* XXX: call with has_bmap = 0 is a workaround to avoid 534 deadlock of bmap. This delays update of i_bmap to just 535 before writing */ 536 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 537 } 538 539 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 540 541 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 542 unsigned long from) 543 { 544 unsigned long b; 545 int ret; 546 547 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 548 return; 549 repeat: 550 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 551 if (ret == -ENOENT) 552 return; 553 else if (ret < 0) 554 goto failed; 555 556 if (b < from) 557 return; 558 559 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 560 ret = nilfs_bmap_truncate(ii->i_bmap, b); 561 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 562 if (!ret || (ret == -ENOMEM && 563 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 564 goto repeat; 565 566 failed: 567 if (ret == -EINVAL) 568 nilfs_error(ii->vfs_inode.i_sb, __func__, 569 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 570 else 571 nilfs_warning(ii->vfs_inode.i_sb, __func__, 572 "failed to truncate bmap (ino=%lu, err=%d)", 573 ii->vfs_inode.i_ino, ret); 574 } 575 576 void nilfs_truncate(struct inode *inode) 577 { 578 unsigned long blkoff; 579 unsigned int blocksize; 580 struct nilfs_transaction_info ti; 581 struct super_block *sb = inode->i_sb; 582 struct nilfs_inode_info *ii = NILFS_I(inode); 583 584 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 585 return; 586 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 587 return; 588 589 blocksize = sb->s_blocksize; 590 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 591 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 592 593 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 594 595 nilfs_truncate_bmap(ii, blkoff); 596 597 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 598 if (IS_SYNC(inode)) 599 nilfs_set_transaction_flag(NILFS_TI_SYNC); 600 601 nilfs_mark_inode_dirty(inode); 602 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 603 nilfs_transaction_commit(sb); 604 /* May construct a logical segment and may fail in sync mode. 605 But truncate has no return value. */ 606 } 607 608 void nilfs_delete_inode(struct inode *inode) 609 { 610 struct nilfs_transaction_info ti; 611 struct super_block *sb = inode->i_sb; 612 struct nilfs_inode_info *ii = NILFS_I(inode); 613 614 if (unlikely(is_bad_inode(inode))) { 615 if (inode->i_data.nrpages) 616 truncate_inode_pages(&inode->i_data, 0); 617 clear_inode(inode); 618 return; 619 } 620 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 621 622 if (inode->i_data.nrpages) 623 truncate_inode_pages(&inode->i_data, 0); 624 625 nilfs_truncate_bmap(ii, 0); 626 nilfs_mark_inode_dirty(inode); 627 nilfs_free_inode(inode); 628 /* nilfs_free_inode() marks inode buffer dirty */ 629 if (IS_SYNC(inode)) 630 nilfs_set_transaction_flag(NILFS_TI_SYNC); 631 nilfs_transaction_commit(sb); 632 /* May construct a logical segment and may fail in sync mode. 633 But delete_inode has no return value. */ 634 } 635 636 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 637 { 638 struct nilfs_transaction_info ti; 639 struct inode *inode = dentry->d_inode; 640 struct super_block *sb = inode->i_sb; 641 int err; 642 643 err = inode_change_ok(inode, iattr); 644 if (err) 645 return err; 646 647 err = nilfs_transaction_begin(sb, &ti, 0); 648 if (unlikely(err)) 649 return err; 650 err = inode_setattr(inode, iattr); 651 if (!err && (iattr->ia_valid & ATTR_MODE)) 652 err = nilfs_acl_chmod(inode); 653 if (likely(!err)) 654 err = nilfs_transaction_commit(sb); 655 else 656 nilfs_transaction_abort(sb); 657 658 return err; 659 } 660 661 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 662 struct buffer_head **pbh) 663 { 664 struct nilfs_inode_info *ii = NILFS_I(inode); 665 int err; 666 667 spin_lock(&sbi->s_inode_lock); 668 if (ii->i_bh == NULL) { 669 spin_unlock(&sbi->s_inode_lock); 670 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 671 pbh); 672 if (unlikely(err)) 673 return err; 674 spin_lock(&sbi->s_inode_lock); 675 if (ii->i_bh == NULL) 676 ii->i_bh = *pbh; 677 else { 678 brelse(*pbh); 679 *pbh = ii->i_bh; 680 } 681 } else 682 *pbh = ii->i_bh; 683 684 get_bh(*pbh); 685 spin_unlock(&sbi->s_inode_lock); 686 return 0; 687 } 688 689 int nilfs_inode_dirty(struct inode *inode) 690 { 691 struct nilfs_inode_info *ii = NILFS_I(inode); 692 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 693 int ret = 0; 694 695 if (!list_empty(&ii->i_dirty)) { 696 spin_lock(&sbi->s_inode_lock); 697 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 698 test_bit(NILFS_I_BUSY, &ii->i_state); 699 spin_unlock(&sbi->s_inode_lock); 700 } 701 return ret; 702 } 703 704 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 705 unsigned nr_dirty) 706 { 707 struct nilfs_inode_info *ii = NILFS_I(inode); 708 709 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 710 711 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 712 return 0; 713 714 spin_lock(&sbi->s_inode_lock); 715 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 716 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 717 /* Because this routine may race with nilfs_dispose_list(), 718 we have to check NILFS_I_QUEUED here, too. */ 719 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 720 /* This will happen when somebody is freeing 721 this inode. */ 722 nilfs_warning(sbi->s_super, __func__, 723 "cannot get inode (ino=%lu)\n", 724 inode->i_ino); 725 spin_unlock(&sbi->s_inode_lock); 726 return -EINVAL; /* NILFS_I_DIRTY may remain for 727 freeing inode */ 728 } 729 list_del(&ii->i_dirty); 730 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 731 set_bit(NILFS_I_QUEUED, &ii->i_state); 732 } 733 spin_unlock(&sbi->s_inode_lock); 734 return 0; 735 } 736 737 int nilfs_mark_inode_dirty(struct inode *inode) 738 { 739 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 740 struct buffer_head *ibh; 741 int err; 742 743 err = nilfs_load_inode_block(sbi, inode, &ibh); 744 if (unlikely(err)) { 745 nilfs_warning(inode->i_sb, __func__, 746 "failed to reget inode block.\n"); 747 return err; 748 } 749 nilfs_update_inode(inode, ibh); 750 nilfs_mdt_mark_buffer_dirty(ibh); 751 nilfs_mdt_mark_dirty(sbi->s_ifile); 752 brelse(ibh); 753 return 0; 754 } 755 756 /** 757 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 758 * @inode: inode of the file to be registered. 759 * 760 * nilfs_dirty_inode() loads a inode block containing the specified 761 * @inode and copies data from a nilfs_inode to a corresponding inode 762 * entry in the inode block. This operation is excluded from the segment 763 * construction. This function can be called both as a single operation 764 * and as a part of indivisible file operations. 765 */ 766 void nilfs_dirty_inode(struct inode *inode) 767 { 768 struct nilfs_transaction_info ti; 769 770 if (is_bad_inode(inode)) { 771 nilfs_warning(inode->i_sb, __func__, 772 "tried to mark bad_inode dirty. ignored.\n"); 773 dump_stack(); 774 return; 775 } 776 nilfs_transaction_begin(inode->i_sb, &ti, 0); 777 nilfs_mark_inode_dirty(inode); 778 nilfs_transaction_commit(inode->i_sb); /* never fails */ 779 } 780