1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/gfp.h> 26 #include <linux/mpage.h> 27 #include <linux/writeback.h> 28 #include <linux/uio.h> 29 #include "nilfs.h" 30 #include "segment.h" 31 #include "page.h" 32 #include "mdt.h" 33 #include "cpfile.h" 34 #include "ifile.h" 35 36 37 /** 38 * nilfs_get_block() - get a file block on the filesystem (callback function) 39 * @inode - inode struct of the target file 40 * @blkoff - file block number 41 * @bh_result - buffer head to be mapped on 42 * @create - indicate whether allocating the block or not when it has not 43 * been allocated yet. 44 * 45 * This function does not issue actual read request of the specified data 46 * block. It is done by VFS. 47 */ 48 int nilfs_get_block(struct inode *inode, sector_t blkoff, 49 struct buffer_head *bh_result, int create) 50 { 51 struct nilfs_inode_info *ii = NILFS_I(inode); 52 __u64 blknum = 0; 53 int err = 0, ret; 54 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 55 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 56 57 down_read(&NILFS_MDT(dat)->mi_sem); 58 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 59 up_read(&NILFS_MDT(dat)->mi_sem); 60 if (ret >= 0) { /* found */ 61 map_bh(bh_result, inode->i_sb, blknum); 62 if (ret > 0) 63 bh_result->b_size = (ret << inode->i_blkbits); 64 goto out; 65 } 66 /* data block was not found */ 67 if (ret == -ENOENT && create) { 68 struct nilfs_transaction_info ti; 69 70 bh_result->b_blocknr = 0; 71 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 72 if (unlikely(err)) 73 goto out; 74 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 75 (unsigned long)bh_result); 76 if (unlikely(err != 0)) { 77 if (err == -EEXIST) { 78 /* 79 * The get_block() function could be called 80 * from multiple callers for an inode. 81 * However, the page having this block must 82 * be locked in this case. 83 */ 84 printk(KERN_WARNING 85 "nilfs_get_block: a race condition " 86 "while inserting a data block. " 87 "(inode number=%lu, file block " 88 "offset=%llu)\n", 89 inode->i_ino, 90 (unsigned long long)blkoff); 91 err = 0; 92 } else if (err == -EINVAL) { 93 nilfs_error(inode->i_sb, __func__, 94 "broken bmap (inode=%lu)\n", 95 inode->i_ino); 96 err = -EIO; 97 } 98 nilfs_transaction_abort(inode->i_sb); 99 goto out; 100 } 101 nilfs_mark_inode_dirty(inode); 102 nilfs_transaction_commit(inode->i_sb); /* never fails */ 103 /* Error handling should be detailed */ 104 set_buffer_new(bh_result); 105 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 106 to proper value */ 107 } else if (ret == -ENOENT) { 108 /* not found is not error (e.g. hole); must return without 109 the mapped state flag. */ 110 ; 111 } else { 112 err = ret; 113 } 114 115 out: 116 return err; 117 } 118 119 /** 120 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 121 * address_space_operations. 122 * @file - file struct of the file to be read 123 * @page - the page to be read 124 */ 125 static int nilfs_readpage(struct file *file, struct page *page) 126 { 127 return mpage_readpage(page, nilfs_get_block); 128 } 129 130 /** 131 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 132 * address_space_operations. 133 * @file - file struct of the file to be read 134 * @mapping - address_space struct used for reading multiple pages 135 * @pages - the pages to be read 136 * @nr_pages - number of pages to be read 137 */ 138 static int nilfs_readpages(struct file *file, struct address_space *mapping, 139 struct list_head *pages, unsigned nr_pages) 140 { 141 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 142 } 143 144 static int nilfs_writepages(struct address_space *mapping, 145 struct writeback_control *wbc) 146 { 147 struct inode *inode = mapping->host; 148 int err = 0; 149 150 if (wbc->sync_mode == WB_SYNC_ALL) 151 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 152 wbc->range_start, 153 wbc->range_end); 154 return err; 155 } 156 157 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 158 { 159 struct inode *inode = page->mapping->host; 160 int err; 161 162 redirty_page_for_writepage(wbc, page); 163 unlock_page(page); 164 165 if (wbc->sync_mode == WB_SYNC_ALL) { 166 err = nilfs_construct_segment(inode->i_sb); 167 if (unlikely(err)) 168 return err; 169 } else if (wbc->for_reclaim) 170 nilfs_flush_segment(inode->i_sb, inode->i_ino); 171 172 return 0; 173 } 174 175 static int nilfs_set_page_dirty(struct page *page) 176 { 177 int ret = __set_page_dirty_buffers(page); 178 179 if (ret) { 180 struct inode *inode = page->mapping->host; 181 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 182 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 183 184 nilfs_set_file_dirty(sbi, inode, nr_dirty); 185 } 186 return ret; 187 } 188 189 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 190 loff_t pos, unsigned len, unsigned flags, 191 struct page **pagep, void **fsdata) 192 193 { 194 struct inode *inode = mapping->host; 195 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 196 197 if (unlikely(err)) 198 return err; 199 200 err = block_write_begin(mapping, pos, len, flags, pagep, 201 nilfs_get_block); 202 if (unlikely(err)) { 203 loff_t isize = mapping->host->i_size; 204 if (pos + len > isize) 205 vmtruncate(mapping->host, isize); 206 207 nilfs_transaction_abort(inode->i_sb); 208 } 209 return err; 210 } 211 212 static int nilfs_write_end(struct file *file, struct address_space *mapping, 213 loff_t pos, unsigned len, unsigned copied, 214 struct page *page, void *fsdata) 215 { 216 struct inode *inode = mapping->host; 217 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 218 unsigned nr_dirty; 219 int err; 220 221 nr_dirty = nilfs_page_count_clean_buffers(page, start, 222 start + copied); 223 copied = generic_write_end(file, mapping, pos, len, copied, page, 224 fsdata); 225 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 226 err = nilfs_transaction_commit(inode->i_sb); 227 return err ? : copied; 228 } 229 230 static ssize_t 231 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 232 loff_t offset, unsigned long nr_segs) 233 { 234 struct file *file = iocb->ki_filp; 235 struct inode *inode = file->f_mapping->host; 236 ssize_t size; 237 238 if (rw == WRITE) 239 return 0; 240 241 /* Needs synchronization with the cleaner */ 242 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 243 offset, nr_segs, nilfs_get_block, NULL); 244 245 /* 246 * In case of error extending write may have instantiated a few 247 * blocks outside i_size. Trim these off again. 248 */ 249 if (unlikely((rw & WRITE) && size < 0)) { 250 loff_t isize = i_size_read(inode); 251 loff_t end = offset + iov_length(iov, nr_segs); 252 253 if (end > isize) 254 vmtruncate(inode, isize); 255 } 256 257 return size; 258 } 259 260 const struct address_space_operations nilfs_aops = { 261 .writepage = nilfs_writepage, 262 .readpage = nilfs_readpage, 263 .sync_page = block_sync_page, 264 .writepages = nilfs_writepages, 265 .set_page_dirty = nilfs_set_page_dirty, 266 .readpages = nilfs_readpages, 267 .write_begin = nilfs_write_begin, 268 .write_end = nilfs_write_end, 269 /* .releasepage = nilfs_releasepage, */ 270 .invalidatepage = block_invalidatepage, 271 .direct_IO = nilfs_direct_IO, 272 .is_partially_uptodate = block_is_partially_uptodate, 273 }; 274 275 struct inode *nilfs_new_inode(struct inode *dir, int mode) 276 { 277 struct super_block *sb = dir->i_sb; 278 struct nilfs_sb_info *sbi = NILFS_SB(sb); 279 struct inode *inode; 280 struct nilfs_inode_info *ii; 281 int err = -ENOMEM; 282 ino_t ino; 283 284 inode = new_inode(sb); 285 if (unlikely(!inode)) 286 goto failed; 287 288 mapping_set_gfp_mask(inode->i_mapping, 289 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 290 291 ii = NILFS_I(inode); 292 ii->i_state = 1 << NILFS_I_NEW; 293 294 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 295 if (unlikely(err)) 296 goto failed_ifile_create_inode; 297 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 298 299 atomic_inc(&sbi->s_inodes_count); 300 inode_init_owner(inode, dir, mode); 301 inode->i_ino = ino; 302 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 303 304 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 305 err = nilfs_bmap_read(ii->i_bmap, NULL); 306 if (err < 0) 307 goto failed_bmap; 308 309 set_bit(NILFS_I_BMAP, &ii->i_state); 310 /* No lock is needed; iget() ensures it. */ 311 } 312 313 ii->i_flags = NILFS_I(dir)->i_flags; 314 if (S_ISLNK(mode)) 315 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 316 if (!S_ISDIR(mode)) 317 ii->i_flags &= ~NILFS_DIRSYNC_FL; 318 319 /* ii->i_file_acl = 0; */ 320 /* ii->i_dir_acl = 0; */ 321 ii->i_dir_start_lookup = 0; 322 ii->i_cno = 0; 323 nilfs_set_inode_flags(inode); 324 spin_lock(&sbi->s_next_gen_lock); 325 inode->i_generation = sbi->s_next_generation++; 326 spin_unlock(&sbi->s_next_gen_lock); 327 insert_inode_hash(inode); 328 329 err = nilfs_init_acl(inode, dir); 330 if (unlikely(err)) 331 goto failed_acl; /* never occur. When supporting 332 nilfs_init_acl(), proper cancellation of 333 above jobs should be considered */ 334 335 return inode; 336 337 failed_acl: 338 failed_bmap: 339 inode->i_nlink = 0; 340 iput(inode); /* raw_inode will be deleted through 341 generic_delete_inode() */ 342 goto failed; 343 344 failed_ifile_create_inode: 345 make_bad_inode(inode); 346 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 347 called */ 348 failed: 349 return ERR_PTR(err); 350 } 351 352 void nilfs_free_inode(struct inode *inode) 353 { 354 struct super_block *sb = inode->i_sb; 355 struct nilfs_sb_info *sbi = NILFS_SB(sb); 356 357 clear_inode(inode); 358 /* XXX: check error code? Is there any thing I can do? */ 359 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 360 atomic_dec(&sbi->s_inodes_count); 361 } 362 363 void nilfs_set_inode_flags(struct inode *inode) 364 { 365 unsigned int flags = NILFS_I(inode)->i_flags; 366 367 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 368 S_DIRSYNC); 369 if (flags & NILFS_SYNC_FL) 370 inode->i_flags |= S_SYNC; 371 if (flags & NILFS_APPEND_FL) 372 inode->i_flags |= S_APPEND; 373 if (flags & NILFS_IMMUTABLE_FL) 374 inode->i_flags |= S_IMMUTABLE; 375 #ifndef NILFS_ATIME_DISABLE 376 if (flags & NILFS_NOATIME_FL) 377 #endif 378 inode->i_flags |= S_NOATIME; 379 if (flags & NILFS_DIRSYNC_FL) 380 inode->i_flags |= S_DIRSYNC; 381 mapping_set_gfp_mask(inode->i_mapping, 382 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 383 } 384 385 int nilfs_read_inode_common(struct inode *inode, 386 struct nilfs_inode *raw_inode) 387 { 388 struct nilfs_inode_info *ii = NILFS_I(inode); 389 int err; 390 391 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 392 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 393 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 394 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 395 inode->i_size = le64_to_cpu(raw_inode->i_size); 396 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 397 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 398 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 399 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 400 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 401 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 402 if (inode->i_nlink == 0 && inode->i_mode == 0) 403 return -EINVAL; /* this inode is deleted */ 404 405 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 406 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 407 #if 0 408 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 409 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 410 0 : le32_to_cpu(raw_inode->i_dir_acl); 411 #endif 412 ii->i_dir_start_lookup = 0; 413 ii->i_cno = 0; 414 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 415 416 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 417 S_ISLNK(inode->i_mode)) { 418 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 419 if (err < 0) 420 return err; 421 set_bit(NILFS_I_BMAP, &ii->i_state); 422 /* No lock is needed; iget() ensures it. */ 423 } 424 return 0; 425 } 426 427 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 428 struct inode *inode) 429 { 430 struct nilfs_sb_info *sbi = NILFS_SB(sb); 431 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 432 struct buffer_head *bh; 433 struct nilfs_inode *raw_inode; 434 int err; 435 436 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 437 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 438 if (unlikely(err)) 439 goto bad_inode; 440 441 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 442 443 err = nilfs_read_inode_common(inode, raw_inode); 444 if (err) 445 goto failed_unmap; 446 447 if (S_ISREG(inode->i_mode)) { 448 inode->i_op = &nilfs_file_inode_operations; 449 inode->i_fop = &nilfs_file_operations; 450 inode->i_mapping->a_ops = &nilfs_aops; 451 } else if (S_ISDIR(inode->i_mode)) { 452 inode->i_op = &nilfs_dir_inode_operations; 453 inode->i_fop = &nilfs_dir_operations; 454 inode->i_mapping->a_ops = &nilfs_aops; 455 } else if (S_ISLNK(inode->i_mode)) { 456 inode->i_op = &nilfs_symlink_inode_operations; 457 inode->i_mapping->a_ops = &nilfs_aops; 458 } else { 459 inode->i_op = &nilfs_special_inode_operations; 460 init_special_inode( 461 inode, inode->i_mode, 462 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 463 } 464 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 465 brelse(bh); 466 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 467 nilfs_set_inode_flags(inode); 468 return 0; 469 470 failed_unmap: 471 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 472 brelse(bh); 473 474 bad_inode: 475 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 476 return err; 477 } 478 479 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 480 { 481 struct inode *inode; 482 int err; 483 484 inode = iget_locked(sb, ino); 485 if (unlikely(!inode)) 486 return ERR_PTR(-ENOMEM); 487 if (!(inode->i_state & I_NEW)) 488 return inode; 489 490 err = __nilfs_read_inode(sb, ino, inode); 491 if (unlikely(err)) { 492 iget_failed(inode); 493 return ERR_PTR(err); 494 } 495 unlock_new_inode(inode); 496 return inode; 497 } 498 499 void nilfs_write_inode_common(struct inode *inode, 500 struct nilfs_inode *raw_inode, int has_bmap) 501 { 502 struct nilfs_inode_info *ii = NILFS_I(inode); 503 504 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 505 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 506 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 507 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 508 raw_inode->i_size = cpu_to_le64(inode->i_size); 509 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 510 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 511 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 512 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 513 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 514 515 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 516 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 517 518 if (has_bmap) 519 nilfs_bmap_write(ii->i_bmap, raw_inode); 520 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 521 raw_inode->i_device_code = 522 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 523 /* When extending inode, nilfs->ns_inode_size should be checked 524 for substitutions of appended fields */ 525 } 526 527 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 528 { 529 ino_t ino = inode->i_ino; 530 struct nilfs_inode_info *ii = NILFS_I(inode); 531 struct super_block *sb = inode->i_sb; 532 struct nilfs_sb_info *sbi = NILFS_SB(sb); 533 struct nilfs_inode *raw_inode; 534 535 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 536 537 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 538 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 539 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 540 541 nilfs_write_inode_common(inode, raw_inode, 0); 542 /* XXX: call with has_bmap = 0 is a workaround to avoid 543 deadlock of bmap. This delays update of i_bmap to just 544 before writing */ 545 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 546 } 547 548 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 549 550 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 551 unsigned long from) 552 { 553 unsigned long b; 554 int ret; 555 556 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 557 return; 558 repeat: 559 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 560 if (ret == -ENOENT) 561 return; 562 else if (ret < 0) 563 goto failed; 564 565 if (b < from) 566 return; 567 568 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 569 ret = nilfs_bmap_truncate(ii->i_bmap, b); 570 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 571 if (!ret || (ret == -ENOMEM && 572 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 573 goto repeat; 574 575 failed: 576 if (ret == -EINVAL) 577 nilfs_error(ii->vfs_inode.i_sb, __func__, 578 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 579 else 580 nilfs_warning(ii->vfs_inode.i_sb, __func__, 581 "failed to truncate bmap (ino=%lu, err=%d)", 582 ii->vfs_inode.i_ino, ret); 583 } 584 585 void nilfs_truncate(struct inode *inode) 586 { 587 unsigned long blkoff; 588 unsigned int blocksize; 589 struct nilfs_transaction_info ti; 590 struct super_block *sb = inode->i_sb; 591 struct nilfs_inode_info *ii = NILFS_I(inode); 592 593 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 594 return; 595 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 596 return; 597 598 blocksize = sb->s_blocksize; 599 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 600 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 601 602 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 603 604 nilfs_truncate_bmap(ii, blkoff); 605 606 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 607 if (IS_SYNC(inode)) 608 nilfs_set_transaction_flag(NILFS_TI_SYNC); 609 610 nilfs_mark_inode_dirty(inode); 611 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 612 nilfs_transaction_commit(sb); 613 /* May construct a logical segment and may fail in sync mode. 614 But truncate has no return value. */ 615 } 616 617 void nilfs_delete_inode(struct inode *inode) 618 { 619 struct nilfs_transaction_info ti; 620 struct super_block *sb = inode->i_sb; 621 struct nilfs_inode_info *ii = NILFS_I(inode); 622 623 if (unlikely(is_bad_inode(inode))) { 624 if (inode->i_data.nrpages) 625 truncate_inode_pages(&inode->i_data, 0); 626 clear_inode(inode); 627 return; 628 } 629 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 630 631 if (inode->i_data.nrpages) 632 truncate_inode_pages(&inode->i_data, 0); 633 634 nilfs_truncate_bmap(ii, 0); 635 nilfs_mark_inode_dirty(inode); 636 nilfs_free_inode(inode); 637 /* nilfs_free_inode() marks inode buffer dirty */ 638 if (IS_SYNC(inode)) 639 nilfs_set_transaction_flag(NILFS_TI_SYNC); 640 nilfs_transaction_commit(sb); 641 /* May construct a logical segment and may fail in sync mode. 642 But delete_inode has no return value. */ 643 } 644 645 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 646 { 647 struct nilfs_transaction_info ti; 648 struct inode *inode = dentry->d_inode; 649 struct super_block *sb = inode->i_sb; 650 int err; 651 652 err = inode_change_ok(inode, iattr); 653 if (err) 654 return err; 655 656 err = nilfs_transaction_begin(sb, &ti, 0); 657 if (unlikely(err)) 658 return err; 659 err = inode_setattr(inode, iattr); 660 if (!err && (iattr->ia_valid & ATTR_MODE)) 661 err = nilfs_acl_chmod(inode); 662 if (likely(!err)) 663 err = nilfs_transaction_commit(sb); 664 else 665 nilfs_transaction_abort(sb); 666 667 return err; 668 } 669 670 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 671 struct buffer_head **pbh) 672 { 673 struct nilfs_inode_info *ii = NILFS_I(inode); 674 int err; 675 676 spin_lock(&sbi->s_inode_lock); 677 if (ii->i_bh == NULL) { 678 spin_unlock(&sbi->s_inode_lock); 679 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 680 pbh); 681 if (unlikely(err)) 682 return err; 683 spin_lock(&sbi->s_inode_lock); 684 if (ii->i_bh == NULL) 685 ii->i_bh = *pbh; 686 else { 687 brelse(*pbh); 688 *pbh = ii->i_bh; 689 } 690 } else 691 *pbh = ii->i_bh; 692 693 get_bh(*pbh); 694 spin_unlock(&sbi->s_inode_lock); 695 return 0; 696 } 697 698 int nilfs_inode_dirty(struct inode *inode) 699 { 700 struct nilfs_inode_info *ii = NILFS_I(inode); 701 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 702 int ret = 0; 703 704 if (!list_empty(&ii->i_dirty)) { 705 spin_lock(&sbi->s_inode_lock); 706 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 707 test_bit(NILFS_I_BUSY, &ii->i_state); 708 spin_unlock(&sbi->s_inode_lock); 709 } 710 return ret; 711 } 712 713 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 714 unsigned nr_dirty) 715 { 716 struct nilfs_inode_info *ii = NILFS_I(inode); 717 718 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 719 720 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 721 return 0; 722 723 spin_lock(&sbi->s_inode_lock); 724 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 725 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 726 /* Because this routine may race with nilfs_dispose_list(), 727 we have to check NILFS_I_QUEUED here, too. */ 728 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 729 /* This will happen when somebody is freeing 730 this inode. */ 731 nilfs_warning(sbi->s_super, __func__, 732 "cannot get inode (ino=%lu)\n", 733 inode->i_ino); 734 spin_unlock(&sbi->s_inode_lock); 735 return -EINVAL; /* NILFS_I_DIRTY may remain for 736 freeing inode */ 737 } 738 list_del(&ii->i_dirty); 739 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 740 set_bit(NILFS_I_QUEUED, &ii->i_state); 741 } 742 spin_unlock(&sbi->s_inode_lock); 743 return 0; 744 } 745 746 int nilfs_mark_inode_dirty(struct inode *inode) 747 { 748 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 749 struct buffer_head *ibh; 750 int err; 751 752 err = nilfs_load_inode_block(sbi, inode, &ibh); 753 if (unlikely(err)) { 754 nilfs_warning(inode->i_sb, __func__, 755 "failed to reget inode block.\n"); 756 return err; 757 } 758 nilfs_update_inode(inode, ibh); 759 nilfs_mdt_mark_buffer_dirty(ibh); 760 nilfs_mdt_mark_dirty(sbi->s_ifile); 761 brelse(ibh); 762 return 0; 763 } 764 765 /** 766 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 767 * @inode: inode of the file to be registered. 768 * 769 * nilfs_dirty_inode() loads a inode block containing the specified 770 * @inode and copies data from a nilfs_inode to a corresponding inode 771 * entry in the inode block. This operation is excluded from the segment 772 * construction. This function can be called both as a single operation 773 * and as a part of indivisible file operations. 774 */ 775 void nilfs_dirty_inode(struct inode *inode) 776 { 777 struct nilfs_transaction_info ti; 778 779 if (is_bad_inode(inode)) { 780 nilfs_warning(inode->i_sb, __func__, 781 "tried to mark bad_inode dirty. ignored.\n"); 782 dump_stack(); 783 return; 784 } 785 nilfs_transaction_begin(inode->i_sb, &ti, 0); 786 nilfs_mark_inode_dirty(inode); 787 nilfs_transaction_commit(inode->i_sb); /* never fails */ 788 } 789