1ae98043fSRyusuke Konishi // SPDX-License-Identifier: GPL-2.0+ 205fe58fdSRyusuke Konishi /* 394ee1d91SRyusuke Konishi * NILFS inode operations. 405fe58fdSRyusuke Konishi * 505fe58fdSRyusuke Konishi * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 605fe58fdSRyusuke Konishi * 74b420ab4SRyusuke Konishi * Written by Ryusuke Konishi. 805fe58fdSRyusuke Konishi * 905fe58fdSRyusuke Konishi */ 1005fe58fdSRyusuke Konishi 1105fe58fdSRyusuke Konishi #include <linux/buffer_head.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 1305fe58fdSRyusuke Konishi #include <linux/mpage.h> 1456d7acc7SAndreas Rohner #include <linux/pagemap.h> 1505fe58fdSRyusuke Konishi #include <linux/writeback.h> 16e2e40f2cSChristoph Hellwig #include <linux/uio.h> 1710c5db28SChristoph Hellwig #include <linux/fiemap.h> 1805fe58fdSRyusuke Konishi #include "nilfs.h" 196fd1e5c9SAl Viro #include "btnode.h" 2005fe58fdSRyusuke Konishi #include "segment.h" 2105fe58fdSRyusuke Konishi #include "page.h" 2205fe58fdSRyusuke Konishi #include "mdt.h" 2305fe58fdSRyusuke Konishi #include "cpfile.h" 2405fe58fdSRyusuke Konishi #include "ifile.h" 2505fe58fdSRyusuke Konishi 26f5974c8fSVyacheslav Dubeyko /** 27f5974c8fSVyacheslav Dubeyko * struct nilfs_iget_args - arguments used during comparison between inodes 28f5974c8fSVyacheslav Dubeyko * @ino: inode number 29f5974c8fSVyacheslav Dubeyko * @cno: checkpoint number 30f5974c8fSVyacheslav Dubeyko * @root: pointer on NILFS root object (mounted checkpoint) 31f5974c8fSVyacheslav Dubeyko * @for_gc: inode for GC flag 32e897be17SRyusuke Konishi * @for_btnc: inode for B-tree node cache flag 336e211930SRyusuke Konishi * @for_shadow: inode for shadowed page cache flag 34f5974c8fSVyacheslav Dubeyko */ 350e14a359SRyusuke Konishi struct nilfs_iget_args { 360e14a359SRyusuke Konishi u64 ino; 370e14a359SRyusuke Konishi __u64 cno; 384d8d9293SRyusuke Konishi struct nilfs_root *root; 39e897be17SRyusuke Konishi bool for_gc; 40e897be17SRyusuke Konishi bool for_btnc; 416e211930SRyusuke Konishi bool for_shadow; 420e14a359SRyusuke Konishi }; 4305fe58fdSRyusuke Konishi 44705304a8SRyusuke Konishi static int nilfs_iget_test(struct inode *inode, void *opaque); 45705304a8SRyusuke Konishi 46be667377SRyusuke Konishi void nilfs_inode_add_blocks(struct inode *inode, int n) 47be667377SRyusuke Konishi { 48be667377SRyusuke Konishi struct nilfs_root *root = NILFS_I(inode)->i_root; 49be667377SRyusuke Konishi 5093407472SFabian Frederick inode_add_bytes(inode, i_blocksize(inode) * n); 51be667377SRyusuke Konishi if (root) 52e5f7f848SVyacheslav Dubeyko atomic64_add(n, &root->blocks_count); 53be667377SRyusuke Konishi } 54be667377SRyusuke Konishi 55be667377SRyusuke Konishi void nilfs_inode_sub_blocks(struct inode *inode, int n) 56be667377SRyusuke Konishi { 57be667377SRyusuke Konishi struct nilfs_root *root = NILFS_I(inode)->i_root; 58be667377SRyusuke Konishi 5993407472SFabian Frederick inode_sub_bytes(inode, i_blocksize(inode) * n); 60be667377SRyusuke Konishi if (root) 61e5f7f848SVyacheslav Dubeyko atomic64_sub(n, &root->blocks_count); 62be667377SRyusuke Konishi } 63be667377SRyusuke Konishi 6405fe58fdSRyusuke Konishi /** 6505fe58fdSRyusuke Konishi * nilfs_get_block() - get a file block on the filesystem (callback function) 6605fe58fdSRyusuke Konishi * @inode - inode struct of the target file 6705fe58fdSRyusuke Konishi * @blkoff - file block number 6805fe58fdSRyusuke Konishi * @bh_result - buffer head to be mapped on 6905fe58fdSRyusuke Konishi * @create - indicate whether allocating the block or not when it has not 7005fe58fdSRyusuke Konishi * been allocated yet. 7105fe58fdSRyusuke Konishi * 7205fe58fdSRyusuke Konishi * This function does not issue actual read request of the specified data 7305fe58fdSRyusuke Konishi * block. It is done by VFS. 7405fe58fdSRyusuke Konishi */ 7505fe58fdSRyusuke Konishi int nilfs_get_block(struct inode *inode, sector_t blkoff, 7605fe58fdSRyusuke Konishi struct buffer_head *bh_result, int create) 7705fe58fdSRyusuke Konishi { 7805fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 790ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 80c3a7abf0SRyusuke Konishi __u64 blknum = 0; 8105fe58fdSRyusuke Konishi int err = 0, ret; 820c6c44cbSRyusuke Konishi unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; 8305fe58fdSRyusuke Konishi 840ef28f9aSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 85c3a7abf0SRyusuke Konishi ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 860ef28f9aSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 87c3a7abf0SRyusuke Konishi if (ret >= 0) { /* found */ 8805fe58fdSRyusuke Konishi map_bh(bh_result, inode->i_sb, blknum); 89c3a7abf0SRyusuke Konishi if (ret > 0) 90c3a7abf0SRyusuke Konishi bh_result->b_size = (ret << inode->i_blkbits); 9105fe58fdSRyusuke Konishi goto out; 9205fe58fdSRyusuke Konishi } 9305fe58fdSRyusuke Konishi /* data block was not found */ 9405fe58fdSRyusuke Konishi if (ret == -ENOENT && create) { 9505fe58fdSRyusuke Konishi struct nilfs_transaction_info ti; 9605fe58fdSRyusuke Konishi 9705fe58fdSRyusuke Konishi bh_result->b_blocknr = 0; 9805fe58fdSRyusuke Konishi err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 9905fe58fdSRyusuke Konishi if (unlikely(err)) 10005fe58fdSRyusuke Konishi goto out; 1013568a13fSRyusuke Konishi err = nilfs_bmap_insert(ii->i_bmap, blkoff, 10205fe58fdSRyusuke Konishi (unsigned long)bh_result); 10305fe58fdSRyusuke Konishi if (unlikely(err != 0)) { 10405fe58fdSRyusuke Konishi if (err == -EEXIST) { 10505fe58fdSRyusuke Konishi /* 10605fe58fdSRyusuke Konishi * The get_block() function could be called 10705fe58fdSRyusuke Konishi * from multiple callers for an inode. 10805fe58fdSRyusuke Konishi * However, the page having this block must 10905fe58fdSRyusuke Konishi * be locked in this case. 11005fe58fdSRyusuke Konishi */ 111a1d0747aSJoe Perches nilfs_warn(inode->i_sb, 112feee880fSRyusuke Konishi "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", 113feee880fSRyusuke Konishi __func__, inode->i_ino, 11405fe58fdSRyusuke Konishi (unsigned long long)blkoff); 1151f5abe7eSRyusuke Konishi err = 0; 11605fe58fdSRyusuke Konishi } 11747420c79SRyusuke Konishi nilfs_transaction_abort(inode->i_sb); 11805fe58fdSRyusuke Konishi goto out; 11905fe58fdSRyusuke Konishi } 120b9f66140SAndreas Rohner nilfs_mark_inode_dirty_sync(inode); 12147420c79SRyusuke Konishi nilfs_transaction_commit(inode->i_sb); /* never fails */ 12205fe58fdSRyusuke Konishi /* Error handling should be detailed */ 12305fe58fdSRyusuke Konishi set_buffer_new(bh_result); 12427e6c7a3SRyusuke Konishi set_buffer_delay(bh_result); 125076a378bSRyusuke Konishi map_bh(bh_result, inode->i_sb, 0); 126076a378bSRyusuke Konishi /* Disk block number must be changed to proper value */ 127076a378bSRyusuke Konishi 12805fe58fdSRyusuke Konishi } else if (ret == -ENOENT) { 129076a378bSRyusuke Konishi /* 130076a378bSRyusuke Konishi * not found is not error (e.g. hole); must return without 131076a378bSRyusuke Konishi * the mapped state flag. 132076a378bSRyusuke Konishi */ 13305fe58fdSRyusuke Konishi ; 13405fe58fdSRyusuke Konishi } else { 13505fe58fdSRyusuke Konishi err = ret; 13605fe58fdSRyusuke Konishi } 13705fe58fdSRyusuke Konishi 13805fe58fdSRyusuke Konishi out: 13905fe58fdSRyusuke Konishi return err; 14005fe58fdSRyusuke Konishi } 14105fe58fdSRyusuke Konishi 14205fe58fdSRyusuke Konishi /** 14305fe58fdSRyusuke Konishi * nilfs_readpage() - implement readpage() method of nilfs_aops {} 14405fe58fdSRyusuke Konishi * address_space_operations. 14505fe58fdSRyusuke Konishi * @file - file struct of the file to be read 14605fe58fdSRyusuke Konishi * @page - the page to be read 14705fe58fdSRyusuke Konishi */ 14805fe58fdSRyusuke Konishi static int nilfs_readpage(struct file *file, struct page *page) 14905fe58fdSRyusuke Konishi { 15005fe58fdSRyusuke Konishi return mpage_readpage(page, nilfs_get_block); 15105fe58fdSRyusuke Konishi } 15205fe58fdSRyusuke Konishi 153d4388340SMatthew Wilcox (Oracle) static void nilfs_readahead(struct readahead_control *rac) 15405fe58fdSRyusuke Konishi { 155d4388340SMatthew Wilcox (Oracle) mpage_readahead(rac, nilfs_get_block); 15605fe58fdSRyusuke Konishi } 15705fe58fdSRyusuke Konishi 15805fe58fdSRyusuke Konishi static int nilfs_writepages(struct address_space *mapping, 15905fe58fdSRyusuke Konishi struct writeback_control *wbc) 16005fe58fdSRyusuke Konishi { 161f30bf3e4SRyusuke Konishi struct inode *inode = mapping->host; 162f30bf3e4SRyusuke Konishi int err = 0; 163f30bf3e4SRyusuke Konishi 164bc98a42cSDavid Howells if (sb_rdonly(inode->i_sb)) { 1658c26c4e2SVyacheslav Dubeyko nilfs_clear_dirty_pages(mapping, false); 1668c26c4e2SVyacheslav Dubeyko return -EROFS; 1678c26c4e2SVyacheslav Dubeyko } 1688c26c4e2SVyacheslav Dubeyko 169f30bf3e4SRyusuke Konishi if (wbc->sync_mode == WB_SYNC_ALL) 170f30bf3e4SRyusuke Konishi err = nilfs_construct_dsync_segment(inode->i_sb, inode, 171f30bf3e4SRyusuke Konishi wbc->range_start, 172f30bf3e4SRyusuke Konishi wbc->range_end); 173f30bf3e4SRyusuke Konishi return err; 17405fe58fdSRyusuke Konishi } 17505fe58fdSRyusuke Konishi 17605fe58fdSRyusuke Konishi static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 17705fe58fdSRyusuke Konishi { 17805fe58fdSRyusuke Konishi struct inode *inode = page->mapping->host; 17905fe58fdSRyusuke Konishi int err; 18005fe58fdSRyusuke Konishi 181bc98a42cSDavid Howells if (sb_rdonly(inode->i_sb)) { 1828c26c4e2SVyacheslav Dubeyko /* 1838c26c4e2SVyacheslav Dubeyko * It means that filesystem was remounted in read-only 1848c26c4e2SVyacheslav Dubeyko * mode because of error or metadata corruption. But we 1858c26c4e2SVyacheslav Dubeyko * have dirty pages that try to be flushed in background. 1868c26c4e2SVyacheslav Dubeyko * So, here we simply discard this dirty page. 1878c26c4e2SVyacheslav Dubeyko */ 1888c26c4e2SVyacheslav Dubeyko nilfs_clear_dirty_page(page, false); 1898c26c4e2SVyacheslav Dubeyko unlock_page(page); 1908c26c4e2SVyacheslav Dubeyko return -EROFS; 1918c26c4e2SVyacheslav Dubeyko } 1928c26c4e2SVyacheslav Dubeyko 19305fe58fdSRyusuke Konishi redirty_page_for_writepage(wbc, page); 19405fe58fdSRyusuke Konishi unlock_page(page); 19505fe58fdSRyusuke Konishi 19605fe58fdSRyusuke Konishi if (wbc->sync_mode == WB_SYNC_ALL) { 19705fe58fdSRyusuke Konishi err = nilfs_construct_segment(inode->i_sb); 19805fe58fdSRyusuke Konishi if (unlikely(err)) 19905fe58fdSRyusuke Konishi return err; 20005fe58fdSRyusuke Konishi } else if (wbc->for_reclaim) 20105fe58fdSRyusuke Konishi nilfs_flush_segment(inode->i_sb, inode->i_ino); 20205fe58fdSRyusuke Konishi 20305fe58fdSRyusuke Konishi return 0; 20405fe58fdSRyusuke Konishi } 20505fe58fdSRyusuke Konishi 206af7afdc7SMatthew Wilcox (Oracle) static bool nilfs_dirty_folio(struct address_space *mapping, 207af7afdc7SMatthew Wilcox (Oracle) struct folio *folio) 20805fe58fdSRyusuke Konishi { 209af7afdc7SMatthew Wilcox (Oracle) struct inode *inode = mapping->host; 210af7afdc7SMatthew Wilcox (Oracle) struct buffer_head *head; 2110c6c44cbSRyusuke Konishi unsigned int nr_dirty = 0; 212af7afdc7SMatthew Wilcox (Oracle) bool ret = filemap_dirty_folio(mapping, folio); 21305fe58fdSRyusuke Konishi 214136e8770SRyusuke Konishi /* 215af7afdc7SMatthew Wilcox (Oracle) * The page may not be locked, eg if called from try_to_unmap_one() 216136e8770SRyusuke Konishi */ 217af7afdc7SMatthew Wilcox (Oracle) spin_lock(&mapping->private_lock); 218af7afdc7SMatthew Wilcox (Oracle) head = folio_buffers(folio); 219af7afdc7SMatthew Wilcox (Oracle) if (head) { 220af7afdc7SMatthew Wilcox (Oracle) struct buffer_head *bh = head; 221af7afdc7SMatthew Wilcox (Oracle) 222136e8770SRyusuke Konishi do { 223136e8770SRyusuke Konishi /* Do not mark hole blocks dirty */ 224136e8770SRyusuke Konishi if (buffer_dirty(bh) || !buffer_mapped(bh)) 225136e8770SRyusuke Konishi continue; 226136e8770SRyusuke Konishi 227136e8770SRyusuke Konishi set_buffer_dirty(bh); 228136e8770SRyusuke Konishi nr_dirty++; 229136e8770SRyusuke Konishi } while (bh = bh->b_this_page, bh != head); 230af7afdc7SMatthew Wilcox (Oracle) } else if (ret) { 231af7afdc7SMatthew Wilcox (Oracle) nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); 232af7afdc7SMatthew Wilcox (Oracle) } 233af7afdc7SMatthew Wilcox (Oracle) spin_unlock(&mapping->private_lock); 234136e8770SRyusuke Konishi 235136e8770SRyusuke Konishi if (nr_dirty) 236bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, nr_dirty); 23705fe58fdSRyusuke Konishi return ret; 23805fe58fdSRyusuke Konishi } 23905fe58fdSRyusuke Konishi 2402d1b399bSMarco Stornelli void nilfs_write_failed(struct address_space *mapping, loff_t to) 2412d1b399bSMarco Stornelli { 2422d1b399bSMarco Stornelli struct inode *inode = mapping->host; 2432d1b399bSMarco Stornelli 2442d1b399bSMarco Stornelli if (to > inode->i_size) { 2457caef267SKirill A. Shutemov truncate_pagecache(inode, inode->i_size); 2462d1b399bSMarco Stornelli nilfs_truncate(inode); 2472d1b399bSMarco Stornelli } 2482d1b399bSMarco Stornelli } 2492d1b399bSMarco Stornelli 25005fe58fdSRyusuke Konishi static int nilfs_write_begin(struct file *file, struct address_space *mapping, 25105fe58fdSRyusuke Konishi loff_t pos, unsigned len, unsigned flags, 25205fe58fdSRyusuke Konishi struct page **pagep, void **fsdata) 25305fe58fdSRyusuke Konishi 25405fe58fdSRyusuke Konishi { 25505fe58fdSRyusuke Konishi struct inode *inode = mapping->host; 25605fe58fdSRyusuke Konishi int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 25705fe58fdSRyusuke Konishi 25805fe58fdSRyusuke Konishi if (unlikely(err)) 25905fe58fdSRyusuke Konishi return err; 26005fe58fdSRyusuke Konishi 261*b3992d1eSMatthew Wilcox (Oracle) err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); 262155130a4SChristoph Hellwig if (unlikely(err)) { 2632d1b399bSMarco Stornelli nilfs_write_failed(mapping, pos + len); 26447420c79SRyusuke Konishi nilfs_transaction_abort(inode->i_sb); 265155130a4SChristoph Hellwig } 26605fe58fdSRyusuke Konishi return err; 26705fe58fdSRyusuke Konishi } 26805fe58fdSRyusuke Konishi 26905fe58fdSRyusuke Konishi static int nilfs_write_end(struct file *file, struct address_space *mapping, 27005fe58fdSRyusuke Konishi loff_t pos, unsigned len, unsigned copied, 27105fe58fdSRyusuke Konishi struct page *page, void *fsdata) 27205fe58fdSRyusuke Konishi { 27305fe58fdSRyusuke Konishi struct inode *inode = mapping->host; 2740c6c44cbSRyusuke Konishi unsigned int start = pos & (PAGE_SIZE - 1); 2750c6c44cbSRyusuke Konishi unsigned int nr_dirty; 27605fe58fdSRyusuke Konishi int err; 27705fe58fdSRyusuke Konishi 27805fe58fdSRyusuke Konishi nr_dirty = nilfs_page_count_clean_buffers(page, start, 27905fe58fdSRyusuke Konishi start + copied); 28005fe58fdSRyusuke Konishi copied = generic_write_end(file, mapping, pos, len, copied, page, 28105fe58fdSRyusuke Konishi fsdata); 282bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, nr_dirty); 28347420c79SRyusuke Konishi err = nilfs_transaction_commit(inode->i_sb); 28405fe58fdSRyusuke Konishi return err ? : copied; 28505fe58fdSRyusuke Konishi } 28605fe58fdSRyusuke Konishi 28705fe58fdSRyusuke Konishi static ssize_t 288c8b8e32dSChristoph Hellwig nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 28905fe58fdSRyusuke Konishi { 2906b6dabc8SAl Viro struct inode *inode = file_inode(iocb->ki_filp); 29105fe58fdSRyusuke Konishi 2926f673763SOmar Sandoval if (iov_iter_rw(iter) == WRITE) 29305fe58fdSRyusuke Konishi return 0; 29405fe58fdSRyusuke Konishi 29505fe58fdSRyusuke Konishi /* Needs synchronization with the cleaner */ 296c8b8e32dSChristoph Hellwig return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); 29705fe58fdSRyusuke Konishi } 29805fe58fdSRyusuke Konishi 2997f09410bSAlexey Dobriyan const struct address_space_operations nilfs_aops = { 30005fe58fdSRyusuke Konishi .writepage = nilfs_writepage, 30105fe58fdSRyusuke Konishi .readpage = nilfs_readpage, 30205fe58fdSRyusuke Konishi .writepages = nilfs_writepages, 303af7afdc7SMatthew Wilcox (Oracle) .dirty_folio = nilfs_dirty_folio, 304d4388340SMatthew Wilcox (Oracle) .readahead = nilfs_readahead, 30505fe58fdSRyusuke Konishi .write_begin = nilfs_write_begin, 30605fe58fdSRyusuke Konishi .write_end = nilfs_write_end, 30705fe58fdSRyusuke Konishi /* .releasepage = nilfs_releasepage, */ 3087ba13abbSMatthew Wilcox (Oracle) .invalidate_folio = block_invalidate_folio, 30905fe58fdSRyusuke Konishi .direct_IO = nilfs_direct_IO, 310258ef67eSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 31105fe58fdSRyusuke Konishi }; 31205fe58fdSRyusuke Konishi 313705304a8SRyusuke Konishi static int nilfs_insert_inode_locked(struct inode *inode, 314705304a8SRyusuke Konishi struct nilfs_root *root, 315705304a8SRyusuke Konishi unsigned long ino) 316705304a8SRyusuke Konishi { 317705304a8SRyusuke Konishi struct nilfs_iget_args args = { 318e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false, 3196e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false 320705304a8SRyusuke Konishi }; 321705304a8SRyusuke Konishi 322705304a8SRyusuke Konishi return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 323705304a8SRyusuke Konishi } 324705304a8SRyusuke Konishi 325c6e49e3fSAl Viro struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 32605fe58fdSRyusuke Konishi { 32705fe58fdSRyusuke Konishi struct super_block *sb = dir->i_sb; 328e3154e97SRyusuke Konishi struct the_nilfs *nilfs = sb->s_fs_info; 32905fe58fdSRyusuke Konishi struct inode *inode; 33005fe58fdSRyusuke Konishi struct nilfs_inode_info *ii; 3314d8d9293SRyusuke Konishi struct nilfs_root *root; 33205fe58fdSRyusuke Konishi int err = -ENOMEM; 33305fe58fdSRyusuke Konishi ino_t ino; 33405fe58fdSRyusuke Konishi 33505fe58fdSRyusuke Konishi inode = new_inode(sb); 33605fe58fdSRyusuke Konishi if (unlikely(!inode)) 33705fe58fdSRyusuke Konishi goto failed; 33805fe58fdSRyusuke Konishi 33905fe58fdSRyusuke Konishi mapping_set_gfp_mask(inode->i_mapping, 340c62d2555SMichal Hocko mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 34105fe58fdSRyusuke Konishi 3424d8d9293SRyusuke Konishi root = NILFS_I(dir)->i_root; 34305fe58fdSRyusuke Konishi ii = NILFS_I(inode); 3444ce5c342SRyusuke Konishi ii->i_state = BIT(NILFS_I_NEW); 3454d8d9293SRyusuke Konishi ii->i_root = root; 34605fe58fdSRyusuke Konishi 347e912a5b6SRyusuke Konishi err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); 34805fe58fdSRyusuke Konishi if (unlikely(err)) 34905fe58fdSRyusuke Konishi goto failed_ifile_create_inode; 35005fe58fdSRyusuke Konishi /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 35105fe58fdSRyusuke Konishi 352e5f7f848SVyacheslav Dubeyko atomic64_inc(&root->inodes_count); 35321cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 35405fe58fdSRyusuke Konishi inode->i_ino = ino; 355078cd827SDeepa Dinamani inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 35605fe58fdSRyusuke Konishi 35705fe58fdSRyusuke Konishi if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 35805fe58fdSRyusuke Konishi err = nilfs_bmap_read(ii->i_bmap, NULL); 35905fe58fdSRyusuke Konishi if (err < 0) 360705304a8SRyusuke Konishi goto failed_after_creation; 36105fe58fdSRyusuke Konishi 36205fe58fdSRyusuke Konishi set_bit(NILFS_I_BMAP, &ii->i_state); 36305fe58fdSRyusuke Konishi /* No lock is needed; iget() ensures it. */ 36405fe58fdSRyusuke Konishi } 36505fe58fdSRyusuke Konishi 366b253a3e4SRyusuke Konishi ii->i_flags = nilfs_mask_flags( 367b253a3e4SRyusuke Konishi mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 36805fe58fdSRyusuke Konishi 36905fe58fdSRyusuke Konishi /* ii->i_file_acl = 0; */ 37005fe58fdSRyusuke Konishi /* ii->i_dir_acl = 0; */ 37105fe58fdSRyusuke Konishi ii->i_dir_start_lookup = 0; 37205fe58fdSRyusuke Konishi nilfs_set_inode_flags(inode); 3739b1fc4e4SRyusuke Konishi spin_lock(&nilfs->ns_next_gen_lock); 3749b1fc4e4SRyusuke Konishi inode->i_generation = nilfs->ns_next_generation++; 3759b1fc4e4SRyusuke Konishi spin_unlock(&nilfs->ns_next_gen_lock); 376705304a8SRyusuke Konishi if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 377705304a8SRyusuke Konishi err = -EIO; 378705304a8SRyusuke Konishi goto failed_after_creation; 379705304a8SRyusuke Konishi } 38005fe58fdSRyusuke Konishi 38105fe58fdSRyusuke Konishi err = nilfs_init_acl(inode, dir); 38205fe58fdSRyusuke Konishi if (unlikely(err)) 383076a378bSRyusuke Konishi /* 384076a378bSRyusuke Konishi * Never occur. When supporting nilfs_init_acl(), 385076a378bSRyusuke Konishi * proper cancellation of above jobs should be considered. 386076a378bSRyusuke Konishi */ 387076a378bSRyusuke Konishi goto failed_after_creation; 38805fe58fdSRyusuke Konishi 38905fe58fdSRyusuke Konishi return inode; 39005fe58fdSRyusuke Konishi 391705304a8SRyusuke Konishi failed_after_creation: 3926d6b77f1SMiklos Szeredi clear_nlink(inode); 3931b0e3186SEric Biggers if (inode->i_state & I_NEW) 394705304a8SRyusuke Konishi unlock_new_inode(inode); 395076a378bSRyusuke Konishi iput(inode); /* 396076a378bSRyusuke Konishi * raw_inode will be deleted through 397076a378bSRyusuke Konishi * nilfs_evict_inode(). 398076a378bSRyusuke Konishi */ 39905fe58fdSRyusuke Konishi goto failed; 40005fe58fdSRyusuke Konishi 40105fe58fdSRyusuke Konishi failed_ifile_create_inode: 40205fe58fdSRyusuke Konishi make_bad_inode(inode); 403076a378bSRyusuke Konishi iput(inode); 40405fe58fdSRyusuke Konishi failed: 40505fe58fdSRyusuke Konishi return ERR_PTR(err); 40605fe58fdSRyusuke Konishi } 40705fe58fdSRyusuke Konishi 40805fe58fdSRyusuke Konishi void nilfs_set_inode_flags(struct inode *inode) 40905fe58fdSRyusuke Konishi { 41005fe58fdSRyusuke Konishi unsigned int flags = NILFS_I(inode)->i_flags; 411faea2c53SRyusuke Konishi unsigned int new_fl = 0; 41205fe58fdSRyusuke Konishi 413f0c9f242SRyusuke Konishi if (flags & FS_SYNC_FL) 414faea2c53SRyusuke Konishi new_fl |= S_SYNC; 415f0c9f242SRyusuke Konishi if (flags & FS_APPEND_FL) 416faea2c53SRyusuke Konishi new_fl |= S_APPEND; 417f0c9f242SRyusuke Konishi if (flags & FS_IMMUTABLE_FL) 418faea2c53SRyusuke Konishi new_fl |= S_IMMUTABLE; 419f0c9f242SRyusuke Konishi if (flags & FS_NOATIME_FL) 420faea2c53SRyusuke Konishi new_fl |= S_NOATIME; 421f0c9f242SRyusuke Konishi if (flags & FS_DIRSYNC_FL) 422faea2c53SRyusuke Konishi new_fl |= S_DIRSYNC; 423faea2c53SRyusuke Konishi inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | 424faea2c53SRyusuke Konishi S_NOATIME | S_DIRSYNC); 42505fe58fdSRyusuke Konishi } 42605fe58fdSRyusuke Konishi 42705fe58fdSRyusuke Konishi int nilfs_read_inode_common(struct inode *inode, 42805fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode) 42905fe58fdSRyusuke Konishi { 43005fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 43105fe58fdSRyusuke Konishi int err; 43205fe58fdSRyusuke Konishi 43305fe58fdSRyusuke Konishi inode->i_mode = le16_to_cpu(raw_inode->i_mode); 434305d3d0dSEric W. Biederman i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 435305d3d0dSEric W. Biederman i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 436bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 43705fe58fdSRyusuke Konishi inode->i_size = le64_to_cpu(raw_inode->i_size); 43805fe58fdSRyusuke Konishi inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 43905fe58fdSRyusuke Konishi inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 44005fe58fdSRyusuke Konishi inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 44161239230SRyusuke Konishi inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 44261239230SRyusuke Konishi inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 44361239230SRyusuke Konishi inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 444705304a8SRyusuke Konishi if (inode->i_nlink == 0) 445705304a8SRyusuke Konishi return -ESTALE; /* this inode is deleted */ 44605fe58fdSRyusuke Konishi 44705fe58fdSRyusuke Konishi inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 44805fe58fdSRyusuke Konishi ii->i_flags = le32_to_cpu(raw_inode->i_flags); 44905fe58fdSRyusuke Konishi #if 0 45005fe58fdSRyusuke Konishi ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 45105fe58fdSRyusuke Konishi ii->i_dir_acl = S_ISREG(inode->i_mode) ? 45205fe58fdSRyusuke Konishi 0 : le32_to_cpu(raw_inode->i_dir_acl); 45305fe58fdSRyusuke Konishi #endif 4543cc811bfSRyusuke Konishi ii->i_dir_start_lookup = 0; 45505fe58fdSRyusuke Konishi inode->i_generation = le32_to_cpu(raw_inode->i_generation); 45605fe58fdSRyusuke Konishi 45705fe58fdSRyusuke Konishi if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 45805fe58fdSRyusuke Konishi S_ISLNK(inode->i_mode)) { 45905fe58fdSRyusuke Konishi err = nilfs_bmap_read(ii->i_bmap, raw_inode); 46005fe58fdSRyusuke Konishi if (err < 0) 46105fe58fdSRyusuke Konishi return err; 46205fe58fdSRyusuke Konishi set_bit(NILFS_I_BMAP, &ii->i_state); 46305fe58fdSRyusuke Konishi /* No lock is needed; iget() ensures it. */ 46405fe58fdSRyusuke Konishi } 46505fe58fdSRyusuke Konishi return 0; 46605fe58fdSRyusuke Konishi } 46705fe58fdSRyusuke Konishi 468e912a5b6SRyusuke Konishi static int __nilfs_read_inode(struct super_block *sb, 469e912a5b6SRyusuke Konishi struct nilfs_root *root, unsigned long ino, 47005fe58fdSRyusuke Konishi struct inode *inode) 47105fe58fdSRyusuke Konishi { 472e3154e97SRyusuke Konishi struct the_nilfs *nilfs = sb->s_fs_info; 47305fe58fdSRyusuke Konishi struct buffer_head *bh; 47405fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode; 47505fe58fdSRyusuke Konishi int err; 47605fe58fdSRyusuke Konishi 477365e215cSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 478e912a5b6SRyusuke Konishi err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 47905fe58fdSRyusuke Konishi if (unlikely(err)) 48005fe58fdSRyusuke Konishi goto bad_inode; 48105fe58fdSRyusuke Konishi 482e912a5b6SRyusuke Konishi raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 48305fe58fdSRyusuke Konishi 4841b2f5a64SRyusuke Konishi err = nilfs_read_inode_common(inode, raw_inode); 4851b2f5a64SRyusuke Konishi if (err) 48605fe58fdSRyusuke Konishi goto failed_unmap; 48705fe58fdSRyusuke Konishi 48805fe58fdSRyusuke Konishi if (S_ISREG(inode->i_mode)) { 48905fe58fdSRyusuke Konishi inode->i_op = &nilfs_file_inode_operations; 49005fe58fdSRyusuke Konishi inode->i_fop = &nilfs_file_operations; 49105fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops; 49205fe58fdSRyusuke Konishi } else if (S_ISDIR(inode->i_mode)) { 49305fe58fdSRyusuke Konishi inode->i_op = &nilfs_dir_inode_operations; 49405fe58fdSRyusuke Konishi inode->i_fop = &nilfs_dir_operations; 49505fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops; 49605fe58fdSRyusuke Konishi } else if (S_ISLNK(inode->i_mode)) { 49705fe58fdSRyusuke Konishi inode->i_op = &nilfs_symlink_inode_operations; 49821fc61c7SAl Viro inode_nohighmem(inode); 49905fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops; 50005fe58fdSRyusuke Konishi } else { 50105fe58fdSRyusuke Konishi inode->i_op = &nilfs_special_inode_operations; 50205fe58fdSRyusuke Konishi init_special_inode( 50305fe58fdSRyusuke Konishi inode, inode->i_mode, 504cdce214eSRyusuke Konishi huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 50505fe58fdSRyusuke Konishi } 506e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(root->ifile, ino, bh); 50705fe58fdSRyusuke Konishi brelse(bh); 508365e215cSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 50905fe58fdSRyusuke Konishi nilfs_set_inode_flags(inode); 5100ce187c4SRyusuke Konishi mapping_set_gfp_mask(inode->i_mapping, 511c62d2555SMichal Hocko mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 51205fe58fdSRyusuke Konishi return 0; 51305fe58fdSRyusuke Konishi 51405fe58fdSRyusuke Konishi failed_unmap: 515e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(root->ifile, ino, bh); 51605fe58fdSRyusuke Konishi brelse(bh); 51705fe58fdSRyusuke Konishi 51805fe58fdSRyusuke Konishi bad_inode: 519365e215cSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 52005fe58fdSRyusuke Konishi return err; 52105fe58fdSRyusuke Konishi } 52205fe58fdSRyusuke Konishi 5230e14a359SRyusuke Konishi static int nilfs_iget_test(struct inode *inode, void *opaque) 5240e14a359SRyusuke Konishi { 5250e14a359SRyusuke Konishi struct nilfs_iget_args *args = opaque; 5260e14a359SRyusuke Konishi struct nilfs_inode_info *ii; 5270e14a359SRyusuke Konishi 5284d8d9293SRyusuke Konishi if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 5290e14a359SRyusuke Konishi return 0; 5300e14a359SRyusuke Konishi 5310e14a359SRyusuke Konishi ii = NILFS_I(inode); 532e897be17SRyusuke Konishi if (test_bit(NILFS_I_BTNC, &ii->i_state)) { 533e897be17SRyusuke Konishi if (!args->for_btnc) 534e897be17SRyusuke Konishi return 0; 535e897be17SRyusuke Konishi } else if (args->for_btnc) { 536e897be17SRyusuke Konishi return 0; 537e897be17SRyusuke Konishi } 5386e211930SRyusuke Konishi if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { 5396e211930SRyusuke Konishi if (!args->for_shadow) 5406e211930SRyusuke Konishi return 0; 5416e211930SRyusuke Konishi } else if (args->for_shadow) { 5426e211930SRyusuke Konishi return 0; 5436e211930SRyusuke Konishi } 544e897be17SRyusuke Konishi 5450e14a359SRyusuke Konishi if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 5460e14a359SRyusuke Konishi return !args->for_gc; 5470e14a359SRyusuke Konishi 5480e14a359SRyusuke Konishi return args->for_gc && args->cno == ii->i_cno; 5490e14a359SRyusuke Konishi } 5500e14a359SRyusuke Konishi 5510e14a359SRyusuke Konishi static int nilfs_iget_set(struct inode *inode, void *opaque) 5520e14a359SRyusuke Konishi { 5530e14a359SRyusuke Konishi struct nilfs_iget_args *args = opaque; 5540e14a359SRyusuke Konishi 5550e14a359SRyusuke Konishi inode->i_ino = args->ino; 5560e14a359SRyusuke Konishi NILFS_I(inode)->i_cno = args->cno; 557e897be17SRyusuke Konishi NILFS_I(inode)->i_root = args->root; 5584d8d9293SRyusuke Konishi if (args->root && args->ino == NILFS_ROOT_INO) 5594d8d9293SRyusuke Konishi nilfs_get_root(args->root); 560e897be17SRyusuke Konishi 561e897be17SRyusuke Konishi if (args->for_gc) 562e897be17SRyusuke Konishi NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); 563e897be17SRyusuke Konishi if (args->for_btnc) 564e897be17SRyusuke Konishi NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); 5656e211930SRyusuke Konishi if (args->for_shadow) 5666e211930SRyusuke Konishi NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); 5670e14a359SRyusuke Konishi return 0; 5680e14a359SRyusuke Konishi } 5690e14a359SRyusuke Konishi 570032dbb3bSRyusuke Konishi struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 571032dbb3bSRyusuke Konishi unsigned long ino) 572032dbb3bSRyusuke Konishi { 573032dbb3bSRyusuke Konishi struct nilfs_iget_args args = { 574e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false, 5756e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false 576032dbb3bSRyusuke Konishi }; 577032dbb3bSRyusuke Konishi 578032dbb3bSRyusuke Konishi return ilookup5(sb, ino, nilfs_iget_test, &args); 579032dbb3bSRyusuke Konishi } 580032dbb3bSRyusuke Konishi 581f1e89c86SRyusuke Konishi struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 5824d8d9293SRyusuke Konishi unsigned long ino) 58305fe58fdSRyusuke Konishi { 5844d8d9293SRyusuke Konishi struct nilfs_iget_args args = { 585e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false, 5866e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false 5874d8d9293SRyusuke Konishi }; 588f1e89c86SRyusuke Konishi 589f1e89c86SRyusuke Konishi return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 590f1e89c86SRyusuke Konishi } 591f1e89c86SRyusuke Konishi 592f1e89c86SRyusuke Konishi struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 593f1e89c86SRyusuke Konishi unsigned long ino) 594f1e89c86SRyusuke Konishi { 59505fe58fdSRyusuke Konishi struct inode *inode; 59605fe58fdSRyusuke Konishi int err; 59705fe58fdSRyusuke Konishi 598f1e89c86SRyusuke Konishi inode = nilfs_iget_locked(sb, root, ino); 59905fe58fdSRyusuke Konishi if (unlikely(!inode)) 60005fe58fdSRyusuke Konishi return ERR_PTR(-ENOMEM); 60105fe58fdSRyusuke Konishi if (!(inode->i_state & I_NEW)) 60205fe58fdSRyusuke Konishi return inode; 60305fe58fdSRyusuke Konishi 604e912a5b6SRyusuke Konishi err = __nilfs_read_inode(sb, root, ino, inode); 60505fe58fdSRyusuke Konishi if (unlikely(err)) { 60605fe58fdSRyusuke Konishi iget_failed(inode); 60705fe58fdSRyusuke Konishi return ERR_PTR(err); 60805fe58fdSRyusuke Konishi } 60905fe58fdSRyusuke Konishi unlock_new_inode(inode); 61005fe58fdSRyusuke Konishi return inode; 61105fe58fdSRyusuke Konishi } 61205fe58fdSRyusuke Konishi 613263d90ceSRyusuke Konishi struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 614263d90ceSRyusuke Konishi __u64 cno) 615263d90ceSRyusuke Konishi { 6164d8d9293SRyusuke Konishi struct nilfs_iget_args args = { 617e897be17SRyusuke Konishi .ino = ino, .root = NULL, .cno = cno, .for_gc = true, 6186e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false 6194d8d9293SRyusuke Konishi }; 620263d90ceSRyusuke Konishi struct inode *inode; 621263d90ceSRyusuke Konishi int err; 622263d90ceSRyusuke Konishi 623263d90ceSRyusuke Konishi inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 624263d90ceSRyusuke Konishi if (unlikely(!inode)) 625263d90ceSRyusuke Konishi return ERR_PTR(-ENOMEM); 626263d90ceSRyusuke Konishi if (!(inode->i_state & I_NEW)) 627263d90ceSRyusuke Konishi return inode; 628263d90ceSRyusuke Konishi 629263d90ceSRyusuke Konishi err = nilfs_init_gcinode(inode); 630263d90ceSRyusuke Konishi if (unlikely(err)) { 631263d90ceSRyusuke Konishi iget_failed(inode); 632263d90ceSRyusuke Konishi return ERR_PTR(err); 633263d90ceSRyusuke Konishi } 634263d90ceSRyusuke Konishi unlock_new_inode(inode); 635263d90ceSRyusuke Konishi return inode; 636263d90ceSRyusuke Konishi } 637263d90ceSRyusuke Konishi 638e897be17SRyusuke Konishi /** 639e897be17SRyusuke Konishi * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode 640e897be17SRyusuke Konishi * @inode: inode object 641e897be17SRyusuke Konishi * 642e897be17SRyusuke Konishi * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, 643e897be17SRyusuke Konishi * or does nothing if the inode already has it. This function allocates 644e897be17SRyusuke Konishi * an additional inode to maintain page cache of B-tree nodes one-on-one. 645e897be17SRyusuke Konishi * 646e897be17SRyusuke Konishi * Return Value: On success, 0 is returned. On errors, one of the following 647e897be17SRyusuke Konishi * negative error code is returned. 648e897be17SRyusuke Konishi * 649e897be17SRyusuke Konishi * %-ENOMEM - Insufficient memory available. 650e897be17SRyusuke Konishi */ 651e897be17SRyusuke Konishi int nilfs_attach_btree_node_cache(struct inode *inode) 652e897be17SRyusuke Konishi { 653e897be17SRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 654e897be17SRyusuke Konishi struct inode *btnc_inode; 655e897be17SRyusuke Konishi struct nilfs_iget_args args; 656e897be17SRyusuke Konishi 657e897be17SRyusuke Konishi if (ii->i_assoc_inode) 658e897be17SRyusuke Konishi return 0; 659e897be17SRyusuke Konishi 660e897be17SRyusuke Konishi args.ino = inode->i_ino; 661e897be17SRyusuke Konishi args.root = ii->i_root; 662e897be17SRyusuke Konishi args.cno = ii->i_cno; 663e897be17SRyusuke Konishi args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; 664e897be17SRyusuke Konishi args.for_btnc = true; 6656e211930SRyusuke Konishi args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; 666e897be17SRyusuke Konishi 667e897be17SRyusuke Konishi btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 668e897be17SRyusuke Konishi nilfs_iget_set, &args); 669e897be17SRyusuke Konishi if (unlikely(!btnc_inode)) 670e897be17SRyusuke Konishi return -ENOMEM; 671e897be17SRyusuke Konishi if (btnc_inode->i_state & I_NEW) { 672e897be17SRyusuke Konishi nilfs_init_btnc_inode(btnc_inode); 673e897be17SRyusuke Konishi unlock_new_inode(btnc_inode); 674e897be17SRyusuke Konishi } 675e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_assoc_inode = inode; 676e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; 677e897be17SRyusuke Konishi ii->i_assoc_inode = btnc_inode; 678e897be17SRyusuke Konishi 679e897be17SRyusuke Konishi return 0; 680e897be17SRyusuke Konishi } 681e897be17SRyusuke Konishi 682e897be17SRyusuke Konishi /** 683e897be17SRyusuke Konishi * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode 684e897be17SRyusuke Konishi * @inode: inode object 685e897be17SRyusuke Konishi * 686e897be17SRyusuke Konishi * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its 687e897be17SRyusuke Konishi * holder inode bound to @inode, or does nothing if @inode doesn't have it. 688e897be17SRyusuke Konishi */ 689e897be17SRyusuke Konishi void nilfs_detach_btree_node_cache(struct inode *inode) 690e897be17SRyusuke Konishi { 691e897be17SRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 692e897be17SRyusuke Konishi struct inode *btnc_inode = ii->i_assoc_inode; 693e897be17SRyusuke Konishi 694e897be17SRyusuke Konishi if (btnc_inode) { 695e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_assoc_inode = NULL; 696e897be17SRyusuke Konishi ii->i_assoc_inode = NULL; 697e897be17SRyusuke Konishi iput(btnc_inode); 698e897be17SRyusuke Konishi } 699e897be17SRyusuke Konishi } 700e897be17SRyusuke Konishi 7016e211930SRyusuke Konishi /** 7026e211930SRyusuke Konishi * nilfs_iget_for_shadow - obtain inode for shadow mapping 7036e211930SRyusuke Konishi * @inode: inode object that uses shadow mapping 7046e211930SRyusuke Konishi * 7056e211930SRyusuke Konishi * nilfs_iget_for_shadow() allocates a pair of inodes that holds page 7066e211930SRyusuke Konishi * caches for shadow mapping. The page cache for data pages is set up 7076e211930SRyusuke Konishi * in one inode and the one for b-tree node pages is set up in the 7086e211930SRyusuke Konishi * other inode, which is attached to the former inode. 7096e211930SRyusuke Konishi * 7106e211930SRyusuke Konishi * Return Value: On success, a pointer to the inode for data pages is 7116e211930SRyusuke Konishi * returned. On errors, one of the following negative error code is returned 7126e211930SRyusuke Konishi * in a pointer type. 7136e211930SRyusuke Konishi * 7146e211930SRyusuke Konishi * %-ENOMEM - Insufficient memory available. 7156e211930SRyusuke Konishi */ 7166e211930SRyusuke Konishi struct inode *nilfs_iget_for_shadow(struct inode *inode) 7176e211930SRyusuke Konishi { 7186e211930SRyusuke Konishi struct nilfs_iget_args args = { 7196e211930SRyusuke Konishi .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, 7206e211930SRyusuke Konishi .for_btnc = false, .for_shadow = true 7216e211930SRyusuke Konishi }; 7226e211930SRyusuke Konishi struct inode *s_inode; 7236e211930SRyusuke Konishi int err; 7246e211930SRyusuke Konishi 7256e211930SRyusuke Konishi s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 7266e211930SRyusuke Konishi nilfs_iget_set, &args); 7276e211930SRyusuke Konishi if (unlikely(!s_inode)) 7286e211930SRyusuke Konishi return ERR_PTR(-ENOMEM); 7296e211930SRyusuke Konishi if (!(s_inode->i_state & I_NEW)) 7306e211930SRyusuke Konishi return inode; 7316e211930SRyusuke Konishi 7326e211930SRyusuke Konishi NILFS_I(s_inode)->i_flags = 0; 7336e211930SRyusuke Konishi memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); 7346e211930SRyusuke Konishi mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); 7356e211930SRyusuke Konishi 7366e211930SRyusuke Konishi err = nilfs_attach_btree_node_cache(s_inode); 7376e211930SRyusuke Konishi if (unlikely(err)) { 7386e211930SRyusuke Konishi iget_failed(s_inode); 7396e211930SRyusuke Konishi return ERR_PTR(err); 7406e211930SRyusuke Konishi } 7416e211930SRyusuke Konishi unlock_new_inode(s_inode); 7426e211930SRyusuke Konishi return s_inode; 7436e211930SRyusuke Konishi } 7446e211930SRyusuke Konishi 74505fe58fdSRyusuke Konishi void nilfs_write_inode_common(struct inode *inode, 74605fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode, int has_bmap) 74705fe58fdSRyusuke Konishi { 74805fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 74905fe58fdSRyusuke Konishi 75005fe58fdSRyusuke Konishi raw_inode->i_mode = cpu_to_le16(inode->i_mode); 751305d3d0dSEric W. Biederman raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 752305d3d0dSEric W. Biederman raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 75305fe58fdSRyusuke Konishi raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 75405fe58fdSRyusuke Konishi raw_inode->i_size = cpu_to_le64(inode->i_size); 75505fe58fdSRyusuke Konishi raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 75605fe58fdSRyusuke Konishi raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 75761239230SRyusuke Konishi raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 75861239230SRyusuke Konishi raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 75905fe58fdSRyusuke Konishi raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 76005fe58fdSRyusuke Konishi 76105fe58fdSRyusuke Konishi raw_inode->i_flags = cpu_to_le32(ii->i_flags); 76205fe58fdSRyusuke Konishi raw_inode->i_generation = cpu_to_le32(inode->i_generation); 76305fe58fdSRyusuke Konishi 76456eb5538SRyusuke Konishi if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 76556eb5538SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 76656eb5538SRyusuke Konishi 76756eb5538SRyusuke Konishi /* zero-fill unused portion in the case of super root block */ 76856eb5538SRyusuke Konishi raw_inode->i_xattr = 0; 76956eb5538SRyusuke Konishi raw_inode->i_pad = 0; 77056eb5538SRyusuke Konishi memset((void *)raw_inode + sizeof(*raw_inode), 0, 77156eb5538SRyusuke Konishi nilfs->ns_inode_size - sizeof(*raw_inode)); 77256eb5538SRyusuke Konishi } 77356eb5538SRyusuke Konishi 77405fe58fdSRyusuke Konishi if (has_bmap) 77505fe58fdSRyusuke Konishi nilfs_bmap_write(ii->i_bmap, raw_inode); 77605fe58fdSRyusuke Konishi else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 77705fe58fdSRyusuke Konishi raw_inode->i_device_code = 778cdce214eSRyusuke Konishi cpu_to_le64(huge_encode_dev(inode->i_rdev)); 779076a378bSRyusuke Konishi /* 780076a378bSRyusuke Konishi * When extending inode, nilfs->ns_inode_size should be checked 781076a378bSRyusuke Konishi * for substitutions of appended fields. 782076a378bSRyusuke Konishi */ 78305fe58fdSRyusuke Konishi } 78405fe58fdSRyusuke Konishi 785b9f66140SAndreas Rohner void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 78605fe58fdSRyusuke Konishi { 78705fe58fdSRyusuke Konishi ino_t ino = inode->i_ino; 78805fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 789e912a5b6SRyusuke Konishi struct inode *ifile = ii->i_root->ifile; 79005fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode; 79105fe58fdSRyusuke Konishi 792e912a5b6SRyusuke Konishi raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 79305fe58fdSRyusuke Konishi 79405fe58fdSRyusuke Konishi if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 795e912a5b6SRyusuke Konishi memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 796b9f66140SAndreas Rohner if (flags & I_DIRTY_DATASYNC) 797b9f66140SAndreas Rohner set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 79805fe58fdSRyusuke Konishi 79905fe58fdSRyusuke Konishi nilfs_write_inode_common(inode, raw_inode, 0); 800076a378bSRyusuke Konishi /* 801076a378bSRyusuke Konishi * XXX: call with has_bmap = 0 is a workaround to avoid 802076a378bSRyusuke Konishi * deadlock of bmap. This delays update of i_bmap to just 803076a378bSRyusuke Konishi * before writing. 804076a378bSRyusuke Konishi */ 805076a378bSRyusuke Konishi 806e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(ifile, ino, ibh); 80705fe58fdSRyusuke Konishi } 80805fe58fdSRyusuke Konishi 80905fe58fdSRyusuke Konishi #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 81005fe58fdSRyusuke Konishi 81105fe58fdSRyusuke Konishi static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 81205fe58fdSRyusuke Konishi unsigned long from) 81305fe58fdSRyusuke Konishi { 8143568a13fSRyusuke Konishi __u64 b; 81505fe58fdSRyusuke Konishi int ret; 81605fe58fdSRyusuke Konishi 81705fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 81805fe58fdSRyusuke Konishi return; 81905fe58fdSRyusuke Konishi repeat: 82005fe58fdSRyusuke Konishi ret = nilfs_bmap_last_key(ii->i_bmap, &b); 82105fe58fdSRyusuke Konishi if (ret == -ENOENT) 82205fe58fdSRyusuke Konishi return; 82305fe58fdSRyusuke Konishi else if (ret < 0) 82405fe58fdSRyusuke Konishi goto failed; 82505fe58fdSRyusuke Konishi 82605fe58fdSRyusuke Konishi if (b < from) 82705fe58fdSRyusuke Konishi return; 82805fe58fdSRyusuke Konishi 8293568a13fSRyusuke Konishi b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 83005fe58fdSRyusuke Konishi ret = nilfs_bmap_truncate(ii->i_bmap, b); 83105fe58fdSRyusuke Konishi nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 83205fe58fdSRyusuke Konishi if (!ret || (ret == -ENOMEM && 83305fe58fdSRyusuke Konishi nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 83405fe58fdSRyusuke Konishi goto repeat; 83505fe58fdSRyusuke Konishi 83605fe58fdSRyusuke Konishi failed: 837a1d0747aSJoe Perches nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", 838a1d0747aSJoe Perches ret, ii->vfs_inode.i_ino); 83905fe58fdSRyusuke Konishi } 84005fe58fdSRyusuke Konishi 84105fe58fdSRyusuke Konishi void nilfs_truncate(struct inode *inode) 84205fe58fdSRyusuke Konishi { 84305fe58fdSRyusuke Konishi unsigned long blkoff; 84405fe58fdSRyusuke Konishi unsigned int blocksize; 84505fe58fdSRyusuke Konishi struct nilfs_transaction_info ti; 84605fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb; 84705fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 84805fe58fdSRyusuke Konishi 84905fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 85005fe58fdSRyusuke Konishi return; 85105fe58fdSRyusuke Konishi if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 85205fe58fdSRyusuke Konishi return; 85305fe58fdSRyusuke Konishi 85405fe58fdSRyusuke Konishi blocksize = sb->s_blocksize; 85505fe58fdSRyusuke Konishi blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 8561f5abe7eSRyusuke Konishi nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 85705fe58fdSRyusuke Konishi 85805fe58fdSRyusuke Konishi block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 85905fe58fdSRyusuke Konishi 86005fe58fdSRyusuke Konishi nilfs_truncate_bmap(ii, blkoff); 86105fe58fdSRyusuke Konishi 862078cd827SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 86305fe58fdSRyusuke Konishi if (IS_SYNC(inode)) 86405fe58fdSRyusuke Konishi nilfs_set_transaction_flag(NILFS_TI_SYNC); 86505fe58fdSRyusuke Konishi 866abdb318bSJiro SEKIBA nilfs_mark_inode_dirty(inode); 867bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, 0); 86847420c79SRyusuke Konishi nilfs_transaction_commit(sb); 869076a378bSRyusuke Konishi /* 870076a378bSRyusuke Konishi * May construct a logical segment and may fail in sync mode. 871076a378bSRyusuke Konishi * But truncate has no return value. 872076a378bSRyusuke Konishi */ 87305fe58fdSRyusuke Konishi } 87405fe58fdSRyusuke Konishi 8756fd1e5c9SAl Viro static void nilfs_clear_inode(struct inode *inode) 8766fd1e5c9SAl Viro { 8776fd1e5c9SAl Viro struct nilfs_inode_info *ii = NILFS_I(inode); 8786fd1e5c9SAl Viro 8796fd1e5c9SAl Viro /* 8806fd1e5c9SAl Viro * Free resources allocated in nilfs_read_inode(), here. 8816fd1e5c9SAl Viro */ 8826fd1e5c9SAl Viro BUG_ON(!list_empty(&ii->i_dirty)); 8836fd1e5c9SAl Viro brelse(ii->i_bh); 8846fd1e5c9SAl Viro ii->i_bh = NULL; 8856fd1e5c9SAl Viro 8862d19961dSRyusuke Konishi if (nilfs_is_metadata_file_inode(inode)) 8872d19961dSRyusuke Konishi nilfs_mdt_clear(inode); 888518d1a6aSRyusuke Konishi 8896fd1e5c9SAl Viro if (test_bit(NILFS_I_BMAP, &ii->i_state)) 8906fd1e5c9SAl Viro nilfs_bmap_clear(ii->i_bmap); 8916fd1e5c9SAl Viro 892e897be17SRyusuke Konishi if (!test_bit(NILFS_I_BTNC, &ii->i_state)) 893e897be17SRyusuke Konishi nilfs_detach_btree_node_cache(inode); 8944d8d9293SRyusuke Konishi 8954d8d9293SRyusuke Konishi if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 8964d8d9293SRyusuke Konishi nilfs_put_root(ii->i_root); 8976fd1e5c9SAl Viro } 8986fd1e5c9SAl Viro 8996fd1e5c9SAl Viro void nilfs_evict_inode(struct inode *inode) 90005fe58fdSRyusuke Konishi { 90105fe58fdSRyusuke Konishi struct nilfs_transaction_info ti; 90205fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb; 90305fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 90425b18d39SRyusuke Konishi int ret; 90505fe58fdSRyusuke Konishi 9064d8d9293SRyusuke Konishi if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 90791b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data); 908dbd5768fSJan Kara clear_inode(inode); 9096fd1e5c9SAl Viro nilfs_clear_inode(inode); 91005fe58fdSRyusuke Konishi return; 91105fe58fdSRyusuke Konishi } 9121f5abe7eSRyusuke Konishi nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 9131f5abe7eSRyusuke Konishi 91491b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data); 91505fe58fdSRyusuke Konishi 916e912a5b6SRyusuke Konishi /* TODO: some of the following operations may fail. */ 91705fe58fdSRyusuke Konishi nilfs_truncate_bmap(ii, 0); 918abdb318bSJiro SEKIBA nilfs_mark_inode_dirty(inode); 919dbd5768fSJan Kara clear_inode(inode); 920e912a5b6SRyusuke Konishi 92125b18d39SRyusuke Konishi ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 92225b18d39SRyusuke Konishi if (!ret) 923e5f7f848SVyacheslav Dubeyko atomic64_dec(&ii->i_root->inodes_count); 924e912a5b6SRyusuke Konishi 9256fd1e5c9SAl Viro nilfs_clear_inode(inode); 926e912a5b6SRyusuke Konishi 92705fe58fdSRyusuke Konishi if (IS_SYNC(inode)) 92805fe58fdSRyusuke Konishi nilfs_set_transaction_flag(NILFS_TI_SYNC); 92947420c79SRyusuke Konishi nilfs_transaction_commit(sb); 930076a378bSRyusuke Konishi /* 931076a378bSRyusuke Konishi * May construct a logical segment and may fail in sync mode. 932076a378bSRyusuke Konishi * But delete_inode has no return value. 933076a378bSRyusuke Konishi */ 93405fe58fdSRyusuke Konishi } 93505fe58fdSRyusuke Konishi 936549c7297SChristian Brauner int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 937549c7297SChristian Brauner struct iattr *iattr) 93805fe58fdSRyusuke Konishi { 93905fe58fdSRyusuke Konishi struct nilfs_transaction_info ti; 9402b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 94105fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb; 94247420c79SRyusuke Konishi int err; 94305fe58fdSRyusuke Konishi 9442f221d6fSChristian Brauner err = setattr_prepare(&init_user_ns, dentry, iattr); 94505fe58fdSRyusuke Konishi if (err) 94605fe58fdSRyusuke Konishi return err; 94705fe58fdSRyusuke Konishi 94805fe58fdSRyusuke Konishi err = nilfs_transaction_begin(sb, &ti, 0); 94905fe58fdSRyusuke Konishi if (unlikely(err)) 95005fe58fdSRyusuke Konishi return err; 95147420c79SRyusuke Konishi 9521025774cSChristoph Hellwig if ((iattr->ia_valid & ATTR_SIZE) && 9531025774cSChristoph Hellwig iattr->ia_size != i_size_read(inode)) { 954562c72aaSChristoph Hellwig inode_dio_wait(inode); 9552d1b399bSMarco Stornelli truncate_setsize(inode, iattr->ia_size); 9562d1b399bSMarco Stornelli nilfs_truncate(inode); 9571025774cSChristoph Hellwig } 9581025774cSChristoph Hellwig 9592f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, iattr); 9601025774cSChristoph Hellwig mark_inode_dirty(inode); 9611025774cSChristoph Hellwig 9621025774cSChristoph Hellwig if (iattr->ia_valid & ATTR_MODE) { 9631025774cSChristoph Hellwig err = nilfs_acl_chmod(inode); 9641025774cSChristoph Hellwig if (unlikely(err)) 9651025774cSChristoph Hellwig goto out_err; 9661025774cSChristoph Hellwig } 9671025774cSChristoph Hellwig 9681025774cSChristoph Hellwig return nilfs_transaction_commit(sb); 9691025774cSChristoph Hellwig 9701025774cSChristoph Hellwig out_err: 9711025774cSChristoph Hellwig nilfs_transaction_abort(sb); 97247420c79SRyusuke Konishi return err; 97305fe58fdSRyusuke Konishi } 97405fe58fdSRyusuke Konishi 975549c7297SChristian Brauner int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode, 976549c7297SChristian Brauner int mask) 977dc3d3b81SRyusuke Konishi { 978730e908fSAl Viro struct nilfs_root *root = NILFS_I(inode)->i_root; 9794ad364caSRyusuke Konishi 980dc3d3b81SRyusuke Konishi if ((mask & MAY_WRITE) && root && 981dc3d3b81SRyusuke Konishi root->cno != NILFS_CPTREE_CURRENT_CNO) 982dc3d3b81SRyusuke Konishi return -EROFS; /* snapshot is not writable */ 983dc3d3b81SRyusuke Konishi 98447291baaSChristian Brauner return generic_permission(&init_user_ns, inode, mask); 985dc3d3b81SRyusuke Konishi } 986dc3d3b81SRyusuke Konishi 987bcbc8c64SRyusuke Konishi int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 98805fe58fdSRyusuke Konishi { 989e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 99005fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 99105fe58fdSRyusuke Konishi int err; 99205fe58fdSRyusuke Konishi 993693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock); 99405fe58fdSRyusuke Konishi if (ii->i_bh == NULL) { 995693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock); 996e912a5b6SRyusuke Konishi err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 997e912a5b6SRyusuke Konishi inode->i_ino, pbh); 99805fe58fdSRyusuke Konishi if (unlikely(err)) 99905fe58fdSRyusuke Konishi return err; 1000693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock); 100105fe58fdSRyusuke Konishi if (ii->i_bh == NULL) 100205fe58fdSRyusuke Konishi ii->i_bh = *pbh; 100305fe58fdSRyusuke Konishi else { 100405fe58fdSRyusuke Konishi brelse(*pbh); 100505fe58fdSRyusuke Konishi *pbh = ii->i_bh; 100605fe58fdSRyusuke Konishi } 100705fe58fdSRyusuke Konishi } else 100805fe58fdSRyusuke Konishi *pbh = ii->i_bh; 100905fe58fdSRyusuke Konishi 101005fe58fdSRyusuke Konishi get_bh(*pbh); 1011693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock); 101205fe58fdSRyusuke Konishi return 0; 101305fe58fdSRyusuke Konishi } 101405fe58fdSRyusuke Konishi 101505fe58fdSRyusuke Konishi int nilfs_inode_dirty(struct inode *inode) 101605fe58fdSRyusuke Konishi { 101705fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 1018e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 101905fe58fdSRyusuke Konishi int ret = 0; 102005fe58fdSRyusuke Konishi 102105fe58fdSRyusuke Konishi if (!list_empty(&ii->i_dirty)) { 1022693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock); 102305fe58fdSRyusuke Konishi ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 102405fe58fdSRyusuke Konishi test_bit(NILFS_I_BUSY, &ii->i_state); 1025693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock); 102605fe58fdSRyusuke Konishi } 102705fe58fdSRyusuke Konishi return ret; 102805fe58fdSRyusuke Konishi } 102905fe58fdSRyusuke Konishi 10300c6c44cbSRyusuke Konishi int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) 103105fe58fdSRyusuke Konishi { 103205fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode); 1033e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 103405fe58fdSRyusuke Konishi 1035693dd321SRyusuke Konishi atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 103605fe58fdSRyusuke Konishi 1037458c5b08SRyusuke Konishi if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 103805fe58fdSRyusuke Konishi return 0; 103905fe58fdSRyusuke Konishi 1040693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock); 104105fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 104205fe58fdSRyusuke Konishi !test_bit(NILFS_I_BUSY, &ii->i_state)) { 1043076a378bSRyusuke Konishi /* 1044076a378bSRyusuke Konishi * Because this routine may race with nilfs_dispose_list(), 1045076a378bSRyusuke Konishi * we have to check NILFS_I_QUEUED here, too. 1046076a378bSRyusuke Konishi */ 104705fe58fdSRyusuke Konishi if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 1048076a378bSRyusuke Konishi /* 1049076a378bSRyusuke Konishi * This will happen when somebody is freeing 1050076a378bSRyusuke Konishi * this inode. 1051076a378bSRyusuke Konishi */ 1052a1d0747aSJoe Perches nilfs_warn(inode->i_sb, 1053d6517debSRyusuke Konishi "cannot set file dirty (ino=%lu): the file is being freed", 105405fe58fdSRyusuke Konishi inode->i_ino); 1055693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock); 1056076a378bSRyusuke Konishi return -EINVAL; /* 1057076a378bSRyusuke Konishi * NILFS_I_DIRTY may remain for 1058076a378bSRyusuke Konishi * freeing inode. 1059076a378bSRyusuke Konishi */ 106005fe58fdSRyusuke Konishi } 1061eaae0f37SNicolas Kaiser list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 106205fe58fdSRyusuke Konishi set_bit(NILFS_I_QUEUED, &ii->i_state); 106305fe58fdSRyusuke Konishi } 1064693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock); 106505fe58fdSRyusuke Konishi return 0; 106605fe58fdSRyusuke Konishi } 106705fe58fdSRyusuke Konishi 1068b9f66140SAndreas Rohner int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 106905fe58fdSRyusuke Konishi { 107005fe58fdSRyusuke Konishi struct buffer_head *ibh; 107105fe58fdSRyusuke Konishi int err; 107205fe58fdSRyusuke Konishi 1073bcbc8c64SRyusuke Konishi err = nilfs_load_inode_block(inode, &ibh); 107405fe58fdSRyusuke Konishi if (unlikely(err)) { 1075a1d0747aSJoe Perches nilfs_warn(inode->i_sb, 1076d6517debSRyusuke Konishi "cannot mark inode dirty (ino=%lu): error %d loading inode block", 1077d6517debSRyusuke Konishi inode->i_ino, err); 107805fe58fdSRyusuke Konishi return err; 107905fe58fdSRyusuke Konishi } 1080b9f66140SAndreas Rohner nilfs_update_inode(inode, ibh, flags); 10815fc7b141SRyusuke Konishi mark_buffer_dirty(ibh); 1082e912a5b6SRyusuke Konishi nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 108305fe58fdSRyusuke Konishi brelse(ibh); 108405fe58fdSRyusuke Konishi return 0; 108505fe58fdSRyusuke Konishi } 108605fe58fdSRyusuke Konishi 108705fe58fdSRyusuke Konishi /** 108805fe58fdSRyusuke Konishi * nilfs_dirty_inode - reflect changes on given inode to an inode block. 108905fe58fdSRyusuke Konishi * @inode: inode of the file to be registered. 109005fe58fdSRyusuke Konishi * 109105fe58fdSRyusuke Konishi * nilfs_dirty_inode() loads a inode block containing the specified 109205fe58fdSRyusuke Konishi * @inode and copies data from a nilfs_inode to a corresponding inode 109305fe58fdSRyusuke Konishi * entry in the inode block. This operation is excluded from the segment 109405fe58fdSRyusuke Konishi * construction. This function can be called both as a single operation 109505fe58fdSRyusuke Konishi * and as a part of indivisible file operations. 109605fe58fdSRyusuke Konishi */ 1097aa385729SChristoph Hellwig void nilfs_dirty_inode(struct inode *inode, int flags) 109805fe58fdSRyusuke Konishi { 109905fe58fdSRyusuke Konishi struct nilfs_transaction_info ti; 11007d6cd92fSRyusuke Konishi struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 110105fe58fdSRyusuke Konishi 110205fe58fdSRyusuke Konishi if (is_bad_inode(inode)) { 1103a1d0747aSJoe Perches nilfs_warn(inode->i_sb, 110406f4abf6SRyusuke Konishi "tried to mark bad_inode dirty. ignored."); 110505fe58fdSRyusuke Konishi dump_stack(); 110605fe58fdSRyusuke Konishi return; 110705fe58fdSRyusuke Konishi } 11087d6cd92fSRyusuke Konishi if (mdi) { 11097d6cd92fSRyusuke Konishi nilfs_mdt_mark_dirty(inode); 11107d6cd92fSRyusuke Konishi return; 11117d6cd92fSRyusuke Konishi } 111205fe58fdSRyusuke Konishi nilfs_transaction_begin(inode->i_sb, &ti, 0); 1113b9f66140SAndreas Rohner __nilfs_mark_inode_dirty(inode, flags); 111447420c79SRyusuke Konishi nilfs_transaction_commit(inode->i_sb); /* never fails */ 111505fe58fdSRyusuke Konishi } 1116622daaffSRyusuke Konishi 1117622daaffSRyusuke Konishi int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1118622daaffSRyusuke Konishi __u64 start, __u64 len) 1119622daaffSRyusuke Konishi { 11200ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1121622daaffSRyusuke Konishi __u64 logical = 0, phys = 0, size = 0; 1122622daaffSRyusuke Konishi __u32 flags = 0; 1123622daaffSRyusuke Konishi loff_t isize; 1124622daaffSRyusuke Konishi sector_t blkoff, end_blkoff; 1125622daaffSRyusuke Konishi sector_t delalloc_blkoff; 1126622daaffSRyusuke Konishi unsigned long delalloc_blklen; 1127622daaffSRyusuke Konishi unsigned int blkbits = inode->i_blkbits; 1128622daaffSRyusuke Konishi int ret, n; 1129622daaffSRyusuke Konishi 113045dd052eSChristoph Hellwig ret = fiemap_prep(inode, fieinfo, start, &len, 0); 1131622daaffSRyusuke Konishi if (ret) 1132622daaffSRyusuke Konishi return ret; 1133622daaffSRyusuke Konishi 11345955102cSAl Viro inode_lock(inode); 1135622daaffSRyusuke Konishi 1136622daaffSRyusuke Konishi isize = i_size_read(inode); 1137622daaffSRyusuke Konishi 1138622daaffSRyusuke Konishi blkoff = start >> blkbits; 1139622daaffSRyusuke Konishi end_blkoff = (start + len - 1) >> blkbits; 1140622daaffSRyusuke Konishi 1141622daaffSRyusuke Konishi delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1142622daaffSRyusuke Konishi &delalloc_blkoff); 1143622daaffSRyusuke Konishi 1144622daaffSRyusuke Konishi do { 1145622daaffSRyusuke Konishi __u64 blkphy; 1146622daaffSRyusuke Konishi unsigned int maxblocks; 1147622daaffSRyusuke Konishi 1148622daaffSRyusuke Konishi if (delalloc_blklen && blkoff == delalloc_blkoff) { 1149622daaffSRyusuke Konishi if (size) { 1150622daaffSRyusuke Konishi /* End of the current extent */ 1151622daaffSRyusuke Konishi ret = fiemap_fill_next_extent( 1152622daaffSRyusuke Konishi fieinfo, logical, phys, size, flags); 1153622daaffSRyusuke Konishi if (ret) 1154622daaffSRyusuke Konishi break; 1155622daaffSRyusuke Konishi } 1156622daaffSRyusuke Konishi if (blkoff > end_blkoff) 1157622daaffSRyusuke Konishi break; 1158622daaffSRyusuke Konishi 1159622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1160622daaffSRyusuke Konishi logical = blkoff << blkbits; 1161622daaffSRyusuke Konishi phys = 0; 1162622daaffSRyusuke Konishi size = delalloc_blklen << blkbits; 1163622daaffSRyusuke Konishi 1164622daaffSRyusuke Konishi blkoff = delalloc_blkoff + delalloc_blklen; 1165622daaffSRyusuke Konishi delalloc_blklen = nilfs_find_uncommitted_extent( 1166622daaffSRyusuke Konishi inode, blkoff, &delalloc_blkoff); 1167622daaffSRyusuke Konishi continue; 1168622daaffSRyusuke Konishi } 1169622daaffSRyusuke Konishi 1170622daaffSRyusuke Konishi /* 1171622daaffSRyusuke Konishi * Limit the number of blocks that we look up so as 1172622daaffSRyusuke Konishi * not to get into the next delayed allocation extent. 1173622daaffSRyusuke Konishi */ 1174622daaffSRyusuke Konishi maxblocks = INT_MAX; 1175622daaffSRyusuke Konishi if (delalloc_blklen) 1176622daaffSRyusuke Konishi maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1177622daaffSRyusuke Konishi maxblocks); 1178622daaffSRyusuke Konishi blkphy = 0; 1179622daaffSRyusuke Konishi 1180622daaffSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1181622daaffSRyusuke Konishi n = nilfs_bmap_lookup_contig( 1182622daaffSRyusuke Konishi NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1183622daaffSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1184622daaffSRyusuke Konishi 1185622daaffSRyusuke Konishi if (n < 0) { 1186622daaffSRyusuke Konishi int past_eof; 1187622daaffSRyusuke Konishi 1188622daaffSRyusuke Konishi if (unlikely(n != -ENOENT)) 1189622daaffSRyusuke Konishi break; /* error */ 1190622daaffSRyusuke Konishi 1191622daaffSRyusuke Konishi /* HOLE */ 1192622daaffSRyusuke Konishi blkoff++; 1193622daaffSRyusuke Konishi past_eof = ((blkoff << blkbits) >= isize); 1194622daaffSRyusuke Konishi 1195622daaffSRyusuke Konishi if (size) { 1196622daaffSRyusuke Konishi /* End of the current extent */ 1197622daaffSRyusuke Konishi 1198622daaffSRyusuke Konishi if (past_eof) 1199622daaffSRyusuke Konishi flags |= FIEMAP_EXTENT_LAST; 1200622daaffSRyusuke Konishi 1201622daaffSRyusuke Konishi ret = fiemap_fill_next_extent( 1202622daaffSRyusuke Konishi fieinfo, logical, phys, size, flags); 1203622daaffSRyusuke Konishi if (ret) 1204622daaffSRyusuke Konishi break; 1205622daaffSRyusuke Konishi size = 0; 1206622daaffSRyusuke Konishi } 1207622daaffSRyusuke Konishi if (blkoff > end_blkoff || past_eof) 1208622daaffSRyusuke Konishi break; 1209622daaffSRyusuke Konishi } else { 1210622daaffSRyusuke Konishi if (size) { 1211622daaffSRyusuke Konishi if (phys && blkphy << blkbits == phys + size) { 1212622daaffSRyusuke Konishi /* The current extent goes on */ 1213622daaffSRyusuke Konishi size += n << blkbits; 1214622daaffSRyusuke Konishi } else { 1215622daaffSRyusuke Konishi /* Terminate the current extent */ 1216622daaffSRyusuke Konishi ret = fiemap_fill_next_extent( 1217622daaffSRyusuke Konishi fieinfo, logical, phys, size, 1218622daaffSRyusuke Konishi flags); 1219622daaffSRyusuke Konishi if (ret || blkoff > end_blkoff) 1220622daaffSRyusuke Konishi break; 1221622daaffSRyusuke Konishi 1222622daaffSRyusuke Konishi /* Start another extent */ 1223622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED; 1224622daaffSRyusuke Konishi logical = blkoff << blkbits; 1225622daaffSRyusuke Konishi phys = blkphy << blkbits; 1226622daaffSRyusuke Konishi size = n << blkbits; 1227622daaffSRyusuke Konishi } 1228622daaffSRyusuke Konishi } else { 1229622daaffSRyusuke Konishi /* Start a new extent */ 1230622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED; 1231622daaffSRyusuke Konishi logical = blkoff << blkbits; 1232622daaffSRyusuke Konishi phys = blkphy << blkbits; 1233622daaffSRyusuke Konishi size = n << blkbits; 1234622daaffSRyusuke Konishi } 1235622daaffSRyusuke Konishi blkoff += n; 1236622daaffSRyusuke Konishi } 1237622daaffSRyusuke Konishi cond_resched(); 1238622daaffSRyusuke Konishi } while (true); 1239622daaffSRyusuke Konishi 1240622daaffSRyusuke Konishi /* If ret is 1 then we just hit the end of the extent array */ 1241622daaffSRyusuke Konishi if (ret == 1) 1242622daaffSRyusuke Konishi ret = 0; 1243622daaffSRyusuke Konishi 12445955102cSAl Viro inode_unlock(inode); 1245622daaffSRyusuke Konishi return ret; 1246622daaffSRyusuke Konishi } 1247