1ae98043fSRyusuke Konishi // SPDX-License-Identifier: GPL-2.0+
205fe58fdSRyusuke Konishi /*
394ee1d91SRyusuke Konishi * NILFS inode operations.
405fe58fdSRyusuke Konishi *
505fe58fdSRyusuke Konishi * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
605fe58fdSRyusuke Konishi *
74b420ab4SRyusuke Konishi * Written by Ryusuke Konishi.
805fe58fdSRyusuke Konishi *
905fe58fdSRyusuke Konishi */
1005fe58fdSRyusuke Konishi
1105fe58fdSRyusuke Konishi #include <linux/buffer_head.h>
125a0e3ad6STejun Heo #include <linux/gfp.h>
1305fe58fdSRyusuke Konishi #include <linux/mpage.h>
1456d7acc7SAndreas Rohner #include <linux/pagemap.h>
1505fe58fdSRyusuke Konishi #include <linux/writeback.h>
16e2e40f2cSChristoph Hellwig #include <linux/uio.h>
1710c5db28SChristoph Hellwig #include <linux/fiemap.h>
1805fe58fdSRyusuke Konishi #include "nilfs.h"
196fd1e5c9SAl Viro #include "btnode.h"
2005fe58fdSRyusuke Konishi #include "segment.h"
2105fe58fdSRyusuke Konishi #include "page.h"
2205fe58fdSRyusuke Konishi #include "mdt.h"
2305fe58fdSRyusuke Konishi #include "cpfile.h"
2405fe58fdSRyusuke Konishi #include "ifile.h"
2505fe58fdSRyusuke Konishi
26f5974c8fSVyacheslav Dubeyko /**
27f5974c8fSVyacheslav Dubeyko * struct nilfs_iget_args - arguments used during comparison between inodes
28f5974c8fSVyacheslav Dubeyko * @ino: inode number
29f5974c8fSVyacheslav Dubeyko * @cno: checkpoint number
30f5974c8fSVyacheslav Dubeyko * @root: pointer on NILFS root object (mounted checkpoint)
31f5974c8fSVyacheslav Dubeyko * @for_gc: inode for GC flag
32e897be17SRyusuke Konishi * @for_btnc: inode for B-tree node cache flag
336e211930SRyusuke Konishi * @for_shadow: inode for shadowed page cache flag
34f5974c8fSVyacheslav Dubeyko */
350e14a359SRyusuke Konishi struct nilfs_iget_args {
360e14a359SRyusuke Konishi u64 ino;
370e14a359SRyusuke Konishi __u64 cno;
384d8d9293SRyusuke Konishi struct nilfs_root *root;
39e897be17SRyusuke Konishi bool for_gc;
40e897be17SRyusuke Konishi bool for_btnc;
416e211930SRyusuke Konishi bool for_shadow;
420e14a359SRyusuke Konishi };
4305fe58fdSRyusuke Konishi
44705304a8SRyusuke Konishi static int nilfs_iget_test(struct inode *inode, void *opaque);
45705304a8SRyusuke Konishi
nilfs_inode_add_blocks(struct inode * inode,int n)46be667377SRyusuke Konishi void nilfs_inode_add_blocks(struct inode *inode, int n)
47be667377SRyusuke Konishi {
48be667377SRyusuke Konishi struct nilfs_root *root = NILFS_I(inode)->i_root;
49be667377SRyusuke Konishi
5093407472SFabian Frederick inode_add_bytes(inode, i_blocksize(inode) * n);
51be667377SRyusuke Konishi if (root)
52e5f7f848SVyacheslav Dubeyko atomic64_add(n, &root->blocks_count);
53be667377SRyusuke Konishi }
54be667377SRyusuke Konishi
nilfs_inode_sub_blocks(struct inode * inode,int n)55be667377SRyusuke Konishi void nilfs_inode_sub_blocks(struct inode *inode, int n)
56be667377SRyusuke Konishi {
57be667377SRyusuke Konishi struct nilfs_root *root = NILFS_I(inode)->i_root;
58be667377SRyusuke Konishi
5993407472SFabian Frederick inode_sub_bytes(inode, i_blocksize(inode) * n);
60be667377SRyusuke Konishi if (root)
61e5f7f848SVyacheslav Dubeyko atomic64_sub(n, &root->blocks_count);
62be667377SRyusuke Konishi }
63be667377SRyusuke Konishi
6405fe58fdSRyusuke Konishi /**
6505fe58fdSRyusuke Konishi * nilfs_get_block() - get a file block on the filesystem (callback function)
66516edb45SYang Li * @inode: inode struct of the target file
67516edb45SYang Li * @blkoff: file block number
68516edb45SYang Li * @bh_result: buffer head to be mapped on
69516edb45SYang Li * @create: indicate whether allocating the block or not when it has not
7005fe58fdSRyusuke Konishi * been allocated yet.
7105fe58fdSRyusuke Konishi *
7205fe58fdSRyusuke Konishi * This function does not issue actual read request of the specified data
7305fe58fdSRyusuke Konishi * block. It is done by VFS.
7405fe58fdSRyusuke Konishi */
nilfs_get_block(struct inode * inode,sector_t blkoff,struct buffer_head * bh_result,int create)7505fe58fdSRyusuke Konishi int nilfs_get_block(struct inode *inode, sector_t blkoff,
7605fe58fdSRyusuke Konishi struct buffer_head *bh_result, int create)
7705fe58fdSRyusuke Konishi {
7805fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
790ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
80c3a7abf0SRyusuke Konishi __u64 blknum = 0;
8105fe58fdSRyusuke Konishi int err = 0, ret;
820c6c44cbSRyusuke Konishi unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
8305fe58fdSRyusuke Konishi
840ef28f9aSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85c3a7abf0SRyusuke Konishi ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
860ef28f9aSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
87c3a7abf0SRyusuke Konishi if (ret >= 0) { /* found */
8805fe58fdSRyusuke Konishi map_bh(bh_result, inode->i_sb, blknum);
89c3a7abf0SRyusuke Konishi if (ret > 0)
90c3a7abf0SRyusuke Konishi bh_result->b_size = (ret << inode->i_blkbits);
9105fe58fdSRyusuke Konishi goto out;
9205fe58fdSRyusuke Konishi }
9305fe58fdSRyusuke Konishi /* data block was not found */
9405fe58fdSRyusuke Konishi if (ret == -ENOENT && create) {
9505fe58fdSRyusuke Konishi struct nilfs_transaction_info ti;
9605fe58fdSRyusuke Konishi
9705fe58fdSRyusuke Konishi bh_result->b_blocknr = 0;
9805fe58fdSRyusuke Konishi err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
9905fe58fdSRyusuke Konishi if (unlikely(err))
10005fe58fdSRyusuke Konishi goto out;
1013568a13fSRyusuke Konishi err = nilfs_bmap_insert(ii->i_bmap, blkoff,
10205fe58fdSRyusuke Konishi (unsigned long)bh_result);
10305fe58fdSRyusuke Konishi if (unlikely(err != 0)) {
10405fe58fdSRyusuke Konishi if (err == -EEXIST) {
10505fe58fdSRyusuke Konishi /*
10605fe58fdSRyusuke Konishi * The get_block() function could be called
10705fe58fdSRyusuke Konishi * from multiple callers for an inode.
10805fe58fdSRyusuke Konishi * However, the page having this block must
10905fe58fdSRyusuke Konishi * be locked in this case.
11005fe58fdSRyusuke Konishi */
111a1d0747aSJoe Perches nilfs_warn(inode->i_sb,
112feee880fSRyusuke Konishi "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
113feee880fSRyusuke Konishi __func__, inode->i_ino,
11405fe58fdSRyusuke Konishi (unsigned long long)blkoff);
1150c8aa4cfSRyusuke Konishi err = -EAGAIN;
11605fe58fdSRyusuke Konishi }
11747420c79SRyusuke Konishi nilfs_transaction_abort(inode->i_sb);
11805fe58fdSRyusuke Konishi goto out;
11905fe58fdSRyusuke Konishi }
120b9f66140SAndreas Rohner nilfs_mark_inode_dirty_sync(inode);
12147420c79SRyusuke Konishi nilfs_transaction_commit(inode->i_sb); /* never fails */
12205fe58fdSRyusuke Konishi /* Error handling should be detailed */
12305fe58fdSRyusuke Konishi set_buffer_new(bh_result);
12427e6c7a3SRyusuke Konishi set_buffer_delay(bh_result);
125076a378bSRyusuke Konishi map_bh(bh_result, inode->i_sb, 0);
126076a378bSRyusuke Konishi /* Disk block number must be changed to proper value */
127076a378bSRyusuke Konishi
12805fe58fdSRyusuke Konishi } else if (ret == -ENOENT) {
129076a378bSRyusuke Konishi /*
130076a378bSRyusuke Konishi * not found is not error (e.g. hole); must return without
131076a378bSRyusuke Konishi * the mapped state flag.
132076a378bSRyusuke Konishi */
13305fe58fdSRyusuke Konishi ;
13405fe58fdSRyusuke Konishi } else {
13505fe58fdSRyusuke Konishi err = ret;
13605fe58fdSRyusuke Konishi }
13705fe58fdSRyusuke Konishi
13805fe58fdSRyusuke Konishi out:
13905fe58fdSRyusuke Konishi return err;
14005fe58fdSRyusuke Konishi }
14105fe58fdSRyusuke Konishi
14205fe58fdSRyusuke Konishi /**
143f132ab7dSMatthew Wilcox (Oracle) * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
14405fe58fdSRyusuke Konishi * address_space_operations.
145516edb45SYang Li * @file: file struct of the file to be read
146516edb45SYang Li * @folio: the folio to be read
14705fe58fdSRyusuke Konishi */
nilfs_read_folio(struct file * file,struct folio * folio)148f132ab7dSMatthew Wilcox (Oracle) static int nilfs_read_folio(struct file *file, struct folio *folio)
14905fe58fdSRyusuke Konishi {
150f132ab7dSMatthew Wilcox (Oracle) return mpage_read_folio(folio, nilfs_get_block);
15105fe58fdSRyusuke Konishi }
15205fe58fdSRyusuke Konishi
nilfs_readahead(struct readahead_control * rac)153d4388340SMatthew Wilcox (Oracle) static void nilfs_readahead(struct readahead_control *rac)
15405fe58fdSRyusuke Konishi {
155d4388340SMatthew Wilcox (Oracle) mpage_readahead(rac, nilfs_get_block);
15605fe58fdSRyusuke Konishi }
15705fe58fdSRyusuke Konishi
nilfs_writepages(struct address_space * mapping,struct writeback_control * wbc)15805fe58fdSRyusuke Konishi static int nilfs_writepages(struct address_space *mapping,
15905fe58fdSRyusuke Konishi struct writeback_control *wbc)
16005fe58fdSRyusuke Konishi {
161f30bf3e4SRyusuke Konishi struct inode *inode = mapping->host;
162f30bf3e4SRyusuke Konishi int err = 0;
163f30bf3e4SRyusuke Konishi
164bc98a42cSDavid Howells if (sb_rdonly(inode->i_sb)) {
1658c26c4e2SVyacheslav Dubeyko nilfs_clear_dirty_pages(mapping, false);
1668c26c4e2SVyacheslav Dubeyko return -EROFS;
1678c26c4e2SVyacheslav Dubeyko }
1688c26c4e2SVyacheslav Dubeyko
169f30bf3e4SRyusuke Konishi if (wbc->sync_mode == WB_SYNC_ALL)
170f30bf3e4SRyusuke Konishi err = nilfs_construct_dsync_segment(inode->i_sb, inode,
171f30bf3e4SRyusuke Konishi wbc->range_start,
172f30bf3e4SRyusuke Konishi wbc->range_end);
173f30bf3e4SRyusuke Konishi return err;
17405fe58fdSRyusuke Konishi }
17505fe58fdSRyusuke Konishi
nilfs_writepage(struct page * page,struct writeback_control * wbc)17605fe58fdSRyusuke Konishi static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
17705fe58fdSRyusuke Konishi {
17805fe58fdSRyusuke Konishi struct inode *inode = page->mapping->host;
17905fe58fdSRyusuke Konishi int err;
18005fe58fdSRyusuke Konishi
181bc98a42cSDavid Howells if (sb_rdonly(inode->i_sb)) {
1828c26c4e2SVyacheslav Dubeyko /*
1838c26c4e2SVyacheslav Dubeyko * It means that filesystem was remounted in read-only
1848c26c4e2SVyacheslav Dubeyko * mode because of error or metadata corruption. But we
1858c26c4e2SVyacheslav Dubeyko * have dirty pages that try to be flushed in background.
1868c26c4e2SVyacheslav Dubeyko * So, here we simply discard this dirty page.
1878c26c4e2SVyacheslav Dubeyko */
1888c26c4e2SVyacheslav Dubeyko nilfs_clear_dirty_page(page, false);
1898c26c4e2SVyacheslav Dubeyko unlock_page(page);
1908c26c4e2SVyacheslav Dubeyko return -EROFS;
1918c26c4e2SVyacheslav Dubeyko }
1928c26c4e2SVyacheslav Dubeyko
19305fe58fdSRyusuke Konishi redirty_page_for_writepage(wbc, page);
19405fe58fdSRyusuke Konishi unlock_page(page);
19505fe58fdSRyusuke Konishi
19605fe58fdSRyusuke Konishi if (wbc->sync_mode == WB_SYNC_ALL) {
19705fe58fdSRyusuke Konishi err = nilfs_construct_segment(inode->i_sb);
19805fe58fdSRyusuke Konishi if (unlikely(err))
19905fe58fdSRyusuke Konishi return err;
20005fe58fdSRyusuke Konishi } else if (wbc->for_reclaim)
20105fe58fdSRyusuke Konishi nilfs_flush_segment(inode->i_sb, inode->i_ino);
20205fe58fdSRyusuke Konishi
20305fe58fdSRyusuke Konishi return 0;
20405fe58fdSRyusuke Konishi }
20505fe58fdSRyusuke Konishi
nilfs_dirty_folio(struct address_space * mapping,struct folio * folio)206af7afdc7SMatthew Wilcox (Oracle) static bool nilfs_dirty_folio(struct address_space *mapping,
207af7afdc7SMatthew Wilcox (Oracle) struct folio *folio)
20805fe58fdSRyusuke Konishi {
209af7afdc7SMatthew Wilcox (Oracle) struct inode *inode = mapping->host;
210af7afdc7SMatthew Wilcox (Oracle) struct buffer_head *head;
2110c6c44cbSRyusuke Konishi unsigned int nr_dirty = 0;
212af7afdc7SMatthew Wilcox (Oracle) bool ret = filemap_dirty_folio(mapping, folio);
21305fe58fdSRyusuke Konishi
214136e8770SRyusuke Konishi /*
215af7afdc7SMatthew Wilcox (Oracle) * The page may not be locked, eg if called from try_to_unmap_one()
216136e8770SRyusuke Konishi */
217af7afdc7SMatthew Wilcox (Oracle) spin_lock(&mapping->private_lock);
218af7afdc7SMatthew Wilcox (Oracle) head = folio_buffers(folio);
219af7afdc7SMatthew Wilcox (Oracle) if (head) {
220af7afdc7SMatthew Wilcox (Oracle) struct buffer_head *bh = head;
221af7afdc7SMatthew Wilcox (Oracle)
222136e8770SRyusuke Konishi do {
223136e8770SRyusuke Konishi /* Do not mark hole blocks dirty */
224136e8770SRyusuke Konishi if (buffer_dirty(bh) || !buffer_mapped(bh))
225136e8770SRyusuke Konishi continue;
226136e8770SRyusuke Konishi
227136e8770SRyusuke Konishi set_buffer_dirty(bh);
228136e8770SRyusuke Konishi nr_dirty++;
229136e8770SRyusuke Konishi } while (bh = bh->b_this_page, bh != head);
230af7afdc7SMatthew Wilcox (Oracle) } else if (ret) {
231af7afdc7SMatthew Wilcox (Oracle) nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
232af7afdc7SMatthew Wilcox (Oracle) }
233af7afdc7SMatthew Wilcox (Oracle) spin_unlock(&mapping->private_lock);
234136e8770SRyusuke Konishi
235136e8770SRyusuke Konishi if (nr_dirty)
236bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, nr_dirty);
23705fe58fdSRyusuke Konishi return ret;
23805fe58fdSRyusuke Konishi }
23905fe58fdSRyusuke Konishi
nilfs_write_failed(struct address_space * mapping,loff_t to)2402d1b399bSMarco Stornelli void nilfs_write_failed(struct address_space *mapping, loff_t to)
2412d1b399bSMarco Stornelli {
2422d1b399bSMarco Stornelli struct inode *inode = mapping->host;
2432d1b399bSMarco Stornelli
2442d1b399bSMarco Stornelli if (to > inode->i_size) {
2457caef267SKirill A. Shutemov truncate_pagecache(inode, inode->i_size);
2462d1b399bSMarco Stornelli nilfs_truncate(inode);
2472d1b399bSMarco Stornelli }
2482d1b399bSMarco Stornelli }
2492d1b399bSMarco Stornelli
nilfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)25005fe58fdSRyusuke Konishi static int nilfs_write_begin(struct file *file, struct address_space *mapping,
2519d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
25205fe58fdSRyusuke Konishi struct page **pagep, void **fsdata)
25305fe58fdSRyusuke Konishi
25405fe58fdSRyusuke Konishi {
25505fe58fdSRyusuke Konishi struct inode *inode = mapping->host;
25605fe58fdSRyusuke Konishi int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
25705fe58fdSRyusuke Konishi
25805fe58fdSRyusuke Konishi if (unlikely(err))
25905fe58fdSRyusuke Konishi return err;
26005fe58fdSRyusuke Konishi
261b3992d1eSMatthew Wilcox (Oracle) err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
262155130a4SChristoph Hellwig if (unlikely(err)) {
2632d1b399bSMarco Stornelli nilfs_write_failed(mapping, pos + len);
26447420c79SRyusuke Konishi nilfs_transaction_abort(inode->i_sb);
265155130a4SChristoph Hellwig }
26605fe58fdSRyusuke Konishi return err;
26705fe58fdSRyusuke Konishi }
26805fe58fdSRyusuke Konishi
nilfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)26905fe58fdSRyusuke Konishi static int nilfs_write_end(struct file *file, struct address_space *mapping,
27005fe58fdSRyusuke Konishi loff_t pos, unsigned len, unsigned copied,
27105fe58fdSRyusuke Konishi struct page *page, void *fsdata)
27205fe58fdSRyusuke Konishi {
27305fe58fdSRyusuke Konishi struct inode *inode = mapping->host;
2740c6c44cbSRyusuke Konishi unsigned int start = pos & (PAGE_SIZE - 1);
2750c6c44cbSRyusuke Konishi unsigned int nr_dirty;
27605fe58fdSRyusuke Konishi int err;
27705fe58fdSRyusuke Konishi
27805fe58fdSRyusuke Konishi nr_dirty = nilfs_page_count_clean_buffers(page, start,
27905fe58fdSRyusuke Konishi start + copied);
28005fe58fdSRyusuke Konishi copied = generic_write_end(file, mapping, pos, len, copied, page,
28105fe58fdSRyusuke Konishi fsdata);
282bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, nr_dirty);
28347420c79SRyusuke Konishi err = nilfs_transaction_commit(inode->i_sb);
28405fe58fdSRyusuke Konishi return err ? : copied;
28505fe58fdSRyusuke Konishi }
28605fe58fdSRyusuke Konishi
28705fe58fdSRyusuke Konishi static ssize_t
nilfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)288c8b8e32dSChristoph Hellwig nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
28905fe58fdSRyusuke Konishi {
2906b6dabc8SAl Viro struct inode *inode = file_inode(iocb->ki_filp);
29105fe58fdSRyusuke Konishi
2926f673763SOmar Sandoval if (iov_iter_rw(iter) == WRITE)
29305fe58fdSRyusuke Konishi return 0;
29405fe58fdSRyusuke Konishi
29505fe58fdSRyusuke Konishi /* Needs synchronization with the cleaner */
296c8b8e32dSChristoph Hellwig return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
29705fe58fdSRyusuke Konishi }
29805fe58fdSRyusuke Konishi
2997f09410bSAlexey Dobriyan const struct address_space_operations nilfs_aops = {
30005fe58fdSRyusuke Konishi .writepage = nilfs_writepage,
301f132ab7dSMatthew Wilcox (Oracle) .read_folio = nilfs_read_folio,
30205fe58fdSRyusuke Konishi .writepages = nilfs_writepages,
303af7afdc7SMatthew Wilcox (Oracle) .dirty_folio = nilfs_dirty_folio,
304d4388340SMatthew Wilcox (Oracle) .readahead = nilfs_readahead,
30505fe58fdSRyusuke Konishi .write_begin = nilfs_write_begin,
30605fe58fdSRyusuke Konishi .write_end = nilfs_write_end,
3077ba13abbSMatthew Wilcox (Oracle) .invalidate_folio = block_invalidate_folio,
30805fe58fdSRyusuke Konishi .direct_IO = nilfs_direct_IO,
309258ef67eSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate,
31005fe58fdSRyusuke Konishi };
31105fe58fdSRyusuke Konishi
nilfs_insert_inode_locked(struct inode * inode,struct nilfs_root * root,unsigned long ino)312cfb608b4SRyusuke Konishi const struct address_space_operations nilfs_buffer_cache_aops = {
313cfb608b4SRyusuke Konishi .invalidate_folio = block_invalidate_folio,
314cfb608b4SRyusuke Konishi };
315cfb608b4SRyusuke Konishi
316705304a8SRyusuke Konishi static int nilfs_insert_inode_locked(struct inode *inode,
317705304a8SRyusuke Konishi struct nilfs_root *root,
318705304a8SRyusuke Konishi unsigned long ino)
319705304a8SRyusuke Konishi {
320705304a8SRyusuke Konishi struct nilfs_iget_args args = {
321e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false,
3226e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false
323705304a8SRyusuke Konishi };
nilfs_new_inode(struct inode * dir,umode_t mode)324705304a8SRyusuke Konishi
325705304a8SRyusuke Konishi return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
326705304a8SRyusuke Konishi }
327705304a8SRyusuke Konishi
328c6e49e3fSAl Viro struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
32905fe58fdSRyusuke Konishi {
33005fe58fdSRyusuke Konishi struct super_block *sb = dir->i_sb;
331e3154e97SRyusuke Konishi struct the_nilfs *nilfs = sb->s_fs_info;
33205fe58fdSRyusuke Konishi struct inode *inode;
33305fe58fdSRyusuke Konishi struct nilfs_inode_info *ii;
3344d8d9293SRyusuke Konishi struct nilfs_root *root;
335d325dc6eSRyusuke Konishi struct buffer_head *bh;
33605fe58fdSRyusuke Konishi int err = -ENOMEM;
33705fe58fdSRyusuke Konishi ino_t ino;
33805fe58fdSRyusuke Konishi
33905fe58fdSRyusuke Konishi inode = new_inode(sb);
34005fe58fdSRyusuke Konishi if (unlikely(!inode))
34105fe58fdSRyusuke Konishi goto failed;
34205fe58fdSRyusuke Konishi
34305fe58fdSRyusuke Konishi mapping_set_gfp_mask(inode->i_mapping,
344c62d2555SMichal Hocko mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
34505fe58fdSRyusuke Konishi
3464d8d9293SRyusuke Konishi root = NILFS_I(dir)->i_root;
34705fe58fdSRyusuke Konishi ii = NILFS_I(inode);
3484ce5c342SRyusuke Konishi ii->i_state = BIT(NILFS_I_NEW);
3494d8d9293SRyusuke Konishi ii->i_root = root;
35005fe58fdSRyusuke Konishi
351d325dc6eSRyusuke Konishi err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
35205fe58fdSRyusuke Konishi if (unlikely(err))
35305fe58fdSRyusuke Konishi goto failed_ifile_create_inode;
35405fe58fdSRyusuke Konishi /* reference count of i_bh inherits from nilfs_mdt_read_block() */
35505fe58fdSRyusuke Konishi
356d325dc6eSRyusuke Konishi if (unlikely(ino < NILFS_USER_INO)) {
357d325dc6eSRyusuke Konishi nilfs_warn(sb,
358d325dc6eSRyusuke Konishi "inode bitmap is inconsistent for reserved inodes");
359d325dc6eSRyusuke Konishi do {
360d325dc6eSRyusuke Konishi brelse(bh);
361d325dc6eSRyusuke Konishi err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
362d325dc6eSRyusuke Konishi if (unlikely(err))
363d325dc6eSRyusuke Konishi goto failed_ifile_create_inode;
364d325dc6eSRyusuke Konishi } while (ino < NILFS_USER_INO);
365d325dc6eSRyusuke Konishi
366d325dc6eSRyusuke Konishi nilfs_info(sb, "repaired inode bitmap for reserved inodes");
367d325dc6eSRyusuke Konishi }
368d325dc6eSRyusuke Konishi ii->i_bh = bh;
369d325dc6eSRyusuke Konishi
370e5f7f848SVyacheslav Dubeyko atomic64_inc(&root->inodes_count);
371f2d40141SChristian Brauner inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
37205fe58fdSRyusuke Konishi inode->i_ino = ino;
373e21d4f41SJeff Layton inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
37405fe58fdSRyusuke Konishi
37505fe58fdSRyusuke Konishi if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
37605fe58fdSRyusuke Konishi err = nilfs_bmap_read(ii->i_bmap, NULL);
37705fe58fdSRyusuke Konishi if (err < 0)
378705304a8SRyusuke Konishi goto failed_after_creation;
37905fe58fdSRyusuke Konishi
38005fe58fdSRyusuke Konishi set_bit(NILFS_I_BMAP, &ii->i_state);
38105fe58fdSRyusuke Konishi /* No lock is needed; iget() ensures it. */
38205fe58fdSRyusuke Konishi }
38305fe58fdSRyusuke Konishi
384b253a3e4SRyusuke Konishi ii->i_flags = nilfs_mask_flags(
385b253a3e4SRyusuke Konishi mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
38605fe58fdSRyusuke Konishi
38705fe58fdSRyusuke Konishi /* ii->i_file_acl = 0; */
38805fe58fdSRyusuke Konishi /* ii->i_dir_acl = 0; */
38905fe58fdSRyusuke Konishi ii->i_dir_start_lookup = 0;
39005fe58fdSRyusuke Konishi nilfs_set_inode_flags(inode);
3919b1fc4e4SRyusuke Konishi spin_lock(&nilfs->ns_next_gen_lock);
3929b1fc4e4SRyusuke Konishi inode->i_generation = nilfs->ns_next_generation++;
3939b1fc4e4SRyusuke Konishi spin_unlock(&nilfs->ns_next_gen_lock);
394705304a8SRyusuke Konishi if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
395705304a8SRyusuke Konishi err = -EIO;
396705304a8SRyusuke Konishi goto failed_after_creation;
397705304a8SRyusuke Konishi }
39805fe58fdSRyusuke Konishi
39905fe58fdSRyusuke Konishi err = nilfs_init_acl(inode, dir);
40005fe58fdSRyusuke Konishi if (unlikely(err))
401076a378bSRyusuke Konishi /*
402076a378bSRyusuke Konishi * Never occur. When supporting nilfs_init_acl(),
403076a378bSRyusuke Konishi * proper cancellation of above jobs should be considered.
404076a378bSRyusuke Konishi */
405076a378bSRyusuke Konishi goto failed_after_creation;
40605fe58fdSRyusuke Konishi
40705fe58fdSRyusuke Konishi return inode;
40805fe58fdSRyusuke Konishi
409705304a8SRyusuke Konishi failed_after_creation:
4106d6b77f1SMiklos Szeredi clear_nlink(inode);
4111b0e3186SEric Biggers if (inode->i_state & I_NEW)
412705304a8SRyusuke Konishi unlock_new_inode(inode);
413076a378bSRyusuke Konishi iput(inode); /*
414076a378bSRyusuke Konishi * raw_inode will be deleted through
415076a378bSRyusuke Konishi * nilfs_evict_inode().
416076a378bSRyusuke Konishi */
41705fe58fdSRyusuke Konishi goto failed;
41805fe58fdSRyusuke Konishi
41905fe58fdSRyusuke Konishi failed_ifile_create_inode:
42005fe58fdSRyusuke Konishi make_bad_inode(inode);
421076a378bSRyusuke Konishi iput(inode);
nilfs_set_inode_flags(struct inode * inode)42205fe58fdSRyusuke Konishi failed:
42305fe58fdSRyusuke Konishi return ERR_PTR(err);
42405fe58fdSRyusuke Konishi }
42505fe58fdSRyusuke Konishi
42605fe58fdSRyusuke Konishi void nilfs_set_inode_flags(struct inode *inode)
42705fe58fdSRyusuke Konishi {
42805fe58fdSRyusuke Konishi unsigned int flags = NILFS_I(inode)->i_flags;
429faea2c53SRyusuke Konishi unsigned int new_fl = 0;
43005fe58fdSRyusuke Konishi
431f0c9f242SRyusuke Konishi if (flags & FS_SYNC_FL)
432faea2c53SRyusuke Konishi new_fl |= S_SYNC;
433f0c9f242SRyusuke Konishi if (flags & FS_APPEND_FL)
434faea2c53SRyusuke Konishi new_fl |= S_APPEND;
435f0c9f242SRyusuke Konishi if (flags & FS_IMMUTABLE_FL)
436faea2c53SRyusuke Konishi new_fl |= S_IMMUTABLE;
437f0c9f242SRyusuke Konishi if (flags & FS_NOATIME_FL)
438faea2c53SRyusuke Konishi new_fl |= S_NOATIME;
439f0c9f242SRyusuke Konishi if (flags & FS_DIRSYNC_FL)
440faea2c53SRyusuke Konishi new_fl |= S_DIRSYNC;
nilfs_read_inode_common(struct inode * inode,struct nilfs_inode * raw_inode)441faea2c53SRyusuke Konishi inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
442faea2c53SRyusuke Konishi S_NOATIME | S_DIRSYNC);
44305fe58fdSRyusuke Konishi }
44405fe58fdSRyusuke Konishi
44505fe58fdSRyusuke Konishi int nilfs_read_inode_common(struct inode *inode,
44605fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode)
44705fe58fdSRyusuke Konishi {
44805fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
44905fe58fdSRyusuke Konishi int err;
45005fe58fdSRyusuke Konishi
45105fe58fdSRyusuke Konishi inode->i_mode = le16_to_cpu(raw_inode->i_mode);
452305d3d0dSEric W. Biederman i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
453305d3d0dSEric W. Biederman i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
454bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
45505fe58fdSRyusuke Konishi inode->i_size = le64_to_cpu(raw_inode->i_size);
45605fe58fdSRyusuke Konishi inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
457e21d4f41SJeff Layton inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
458e21d4f41SJeff Layton le32_to_cpu(raw_inode->i_ctime_nsec));
45905fe58fdSRyusuke Konishi inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
46061239230SRyusuke Konishi inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
46161239230SRyusuke Konishi inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
46221a87d88SRyusuke Konishi if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
46321a87d88SRyusuke Konishi return -EIO; /* this inode is for metadata and corrupted */
464705304a8SRyusuke Konishi if (inode->i_nlink == 0)
465705304a8SRyusuke Konishi return -ESTALE; /* this inode is deleted */
46605fe58fdSRyusuke Konishi
46705fe58fdSRyusuke Konishi inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
46805fe58fdSRyusuke Konishi ii->i_flags = le32_to_cpu(raw_inode->i_flags);
46905fe58fdSRyusuke Konishi #if 0
47005fe58fdSRyusuke Konishi ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
47105fe58fdSRyusuke Konishi ii->i_dir_acl = S_ISREG(inode->i_mode) ?
47205fe58fdSRyusuke Konishi 0 : le32_to_cpu(raw_inode->i_dir_acl);
47305fe58fdSRyusuke Konishi #endif
4743cc811bfSRyusuke Konishi ii->i_dir_start_lookup = 0;
47505fe58fdSRyusuke Konishi inode->i_generation = le32_to_cpu(raw_inode->i_generation);
47605fe58fdSRyusuke Konishi
47705fe58fdSRyusuke Konishi if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
47805fe58fdSRyusuke Konishi S_ISLNK(inode->i_mode)) {
47905fe58fdSRyusuke Konishi err = nilfs_bmap_read(ii->i_bmap, raw_inode);
48005fe58fdSRyusuke Konishi if (err < 0)
48105fe58fdSRyusuke Konishi return err;
48205fe58fdSRyusuke Konishi set_bit(NILFS_I_BMAP, &ii->i_state);
48305fe58fdSRyusuke Konishi /* No lock is needed; iget() ensures it. */
__nilfs_read_inode(struct super_block * sb,struct nilfs_root * root,unsigned long ino,struct inode * inode)48405fe58fdSRyusuke Konishi }
48505fe58fdSRyusuke Konishi return 0;
48605fe58fdSRyusuke Konishi }
48705fe58fdSRyusuke Konishi
488e912a5b6SRyusuke Konishi static int __nilfs_read_inode(struct super_block *sb,
489e912a5b6SRyusuke Konishi struct nilfs_root *root, unsigned long ino,
49005fe58fdSRyusuke Konishi struct inode *inode)
49105fe58fdSRyusuke Konishi {
492e3154e97SRyusuke Konishi struct the_nilfs *nilfs = sb->s_fs_info;
49305fe58fdSRyusuke Konishi struct buffer_head *bh;
49405fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode;
49505fe58fdSRyusuke Konishi int err;
49605fe58fdSRyusuke Konishi
497365e215cSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
498e912a5b6SRyusuke Konishi err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
49905fe58fdSRyusuke Konishi if (unlikely(err))
50005fe58fdSRyusuke Konishi goto bad_inode;
50105fe58fdSRyusuke Konishi
502e912a5b6SRyusuke Konishi raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
50305fe58fdSRyusuke Konishi
5041b2f5a64SRyusuke Konishi err = nilfs_read_inode_common(inode, raw_inode);
5051b2f5a64SRyusuke Konishi if (err)
50605fe58fdSRyusuke Konishi goto failed_unmap;
50705fe58fdSRyusuke Konishi
50805fe58fdSRyusuke Konishi if (S_ISREG(inode->i_mode)) {
50905fe58fdSRyusuke Konishi inode->i_op = &nilfs_file_inode_operations;
51005fe58fdSRyusuke Konishi inode->i_fop = &nilfs_file_operations;
51105fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops;
51205fe58fdSRyusuke Konishi } else if (S_ISDIR(inode->i_mode)) {
51305fe58fdSRyusuke Konishi inode->i_op = &nilfs_dir_inode_operations;
51405fe58fdSRyusuke Konishi inode->i_fop = &nilfs_dir_operations;
51505fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops;
51605fe58fdSRyusuke Konishi } else if (S_ISLNK(inode->i_mode)) {
51705fe58fdSRyusuke Konishi inode->i_op = &nilfs_symlink_inode_operations;
51821fc61c7SAl Viro inode_nohighmem(inode);
51905fe58fdSRyusuke Konishi inode->i_mapping->a_ops = &nilfs_aops;
52005fe58fdSRyusuke Konishi } else {
52105fe58fdSRyusuke Konishi inode->i_op = &nilfs_special_inode_operations;
52205fe58fdSRyusuke Konishi init_special_inode(
52305fe58fdSRyusuke Konishi inode, inode->i_mode,
524cdce214eSRyusuke Konishi huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
52505fe58fdSRyusuke Konishi }
526e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(root->ifile, ino, bh);
52705fe58fdSRyusuke Konishi brelse(bh);
528365e215cSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
52905fe58fdSRyusuke Konishi nilfs_set_inode_flags(inode);
5300ce187c4SRyusuke Konishi mapping_set_gfp_mask(inode->i_mapping,
531c62d2555SMichal Hocko mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
53205fe58fdSRyusuke Konishi return 0;
53305fe58fdSRyusuke Konishi
53405fe58fdSRyusuke Konishi failed_unmap:
535e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(root->ifile, ino, bh);
53605fe58fdSRyusuke Konishi brelse(bh);
53705fe58fdSRyusuke Konishi
53805fe58fdSRyusuke Konishi bad_inode:
nilfs_iget_test(struct inode * inode,void * opaque)539365e215cSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
54005fe58fdSRyusuke Konishi return err;
54105fe58fdSRyusuke Konishi }
54205fe58fdSRyusuke Konishi
5430e14a359SRyusuke Konishi static int nilfs_iget_test(struct inode *inode, void *opaque)
5440e14a359SRyusuke Konishi {
5450e14a359SRyusuke Konishi struct nilfs_iget_args *args = opaque;
5460e14a359SRyusuke Konishi struct nilfs_inode_info *ii;
5470e14a359SRyusuke Konishi
5484d8d9293SRyusuke Konishi if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
5490e14a359SRyusuke Konishi return 0;
5500e14a359SRyusuke Konishi
5510e14a359SRyusuke Konishi ii = NILFS_I(inode);
552e897be17SRyusuke Konishi if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
553e897be17SRyusuke Konishi if (!args->for_btnc)
554e897be17SRyusuke Konishi return 0;
555e897be17SRyusuke Konishi } else if (args->for_btnc) {
556e897be17SRyusuke Konishi return 0;
557e897be17SRyusuke Konishi }
5586e211930SRyusuke Konishi if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
5596e211930SRyusuke Konishi if (!args->for_shadow)
5606e211930SRyusuke Konishi return 0;
5616e211930SRyusuke Konishi } else if (args->for_shadow) {
5626e211930SRyusuke Konishi return 0;
5636e211930SRyusuke Konishi }
564e897be17SRyusuke Konishi
5650e14a359SRyusuke Konishi if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
5660e14a359SRyusuke Konishi return !args->for_gc;
nilfs_iget_set(struct inode * inode,void * opaque)5670e14a359SRyusuke Konishi
5680e14a359SRyusuke Konishi return args->for_gc && args->cno == ii->i_cno;
5690e14a359SRyusuke Konishi }
5700e14a359SRyusuke Konishi
5710e14a359SRyusuke Konishi static int nilfs_iget_set(struct inode *inode, void *opaque)
5720e14a359SRyusuke Konishi {
5730e14a359SRyusuke Konishi struct nilfs_iget_args *args = opaque;
5740e14a359SRyusuke Konishi
5750e14a359SRyusuke Konishi inode->i_ino = args->ino;
5760e14a359SRyusuke Konishi NILFS_I(inode)->i_cno = args->cno;
577e897be17SRyusuke Konishi NILFS_I(inode)->i_root = args->root;
5784d8d9293SRyusuke Konishi if (args->root && args->ino == NILFS_ROOT_INO)
5794d8d9293SRyusuke Konishi nilfs_get_root(args->root);
580e897be17SRyusuke Konishi
581e897be17SRyusuke Konishi if (args->for_gc)
582e897be17SRyusuke Konishi NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
583e897be17SRyusuke Konishi if (args->for_btnc)
584e897be17SRyusuke Konishi NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
5856e211930SRyusuke Konishi if (args->for_shadow)
nilfs_ilookup(struct super_block * sb,struct nilfs_root * root,unsigned long ino)5866e211930SRyusuke Konishi NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
5870e14a359SRyusuke Konishi return 0;
5880e14a359SRyusuke Konishi }
5890e14a359SRyusuke Konishi
590032dbb3bSRyusuke Konishi struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
591032dbb3bSRyusuke Konishi unsigned long ino)
592032dbb3bSRyusuke Konishi {
593032dbb3bSRyusuke Konishi struct nilfs_iget_args args = {
594e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false,
5956e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false
596032dbb3bSRyusuke Konishi };
nilfs_iget_locked(struct super_block * sb,struct nilfs_root * root,unsigned long ino)597032dbb3bSRyusuke Konishi
598032dbb3bSRyusuke Konishi return ilookup5(sb, ino, nilfs_iget_test, &args);
599032dbb3bSRyusuke Konishi }
600032dbb3bSRyusuke Konishi
601f1e89c86SRyusuke Konishi struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
6024d8d9293SRyusuke Konishi unsigned long ino)
60305fe58fdSRyusuke Konishi {
6044d8d9293SRyusuke Konishi struct nilfs_iget_args args = {
605e897be17SRyusuke Konishi .ino = ino, .root = root, .cno = 0, .for_gc = false,
6066e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false
6074d8d9293SRyusuke Konishi };
nilfs_iget(struct super_block * sb,struct nilfs_root * root,unsigned long ino)608f1e89c86SRyusuke Konishi
609f1e89c86SRyusuke Konishi return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
610f1e89c86SRyusuke Konishi }
611f1e89c86SRyusuke Konishi
612f1e89c86SRyusuke Konishi struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
613f1e89c86SRyusuke Konishi unsigned long ino)
614f1e89c86SRyusuke Konishi {
61505fe58fdSRyusuke Konishi struct inode *inode;
61605fe58fdSRyusuke Konishi int err;
61705fe58fdSRyusuke Konishi
618f1e89c86SRyusuke Konishi inode = nilfs_iget_locked(sb, root, ino);
61905fe58fdSRyusuke Konishi if (unlikely(!inode))
62005fe58fdSRyusuke Konishi return ERR_PTR(-ENOMEM);
621*284760b3SEdward Adam Davis
622*284760b3SEdward Adam Davis if (!(inode->i_state & I_NEW)) {
623*284760b3SEdward Adam Davis if (!inode->i_nlink) {
624*284760b3SEdward Adam Davis iput(inode);
625*284760b3SEdward Adam Davis return ERR_PTR(-ESTALE);
626*284760b3SEdward Adam Davis }
62705fe58fdSRyusuke Konishi return inode;
628*284760b3SEdward Adam Davis }
nilfs_iget_for_gc(struct super_block * sb,unsigned long ino,__u64 cno)62905fe58fdSRyusuke Konishi
630e912a5b6SRyusuke Konishi err = __nilfs_read_inode(sb, root, ino, inode);
63105fe58fdSRyusuke Konishi if (unlikely(err)) {
63205fe58fdSRyusuke Konishi iget_failed(inode);
63305fe58fdSRyusuke Konishi return ERR_PTR(err);
63405fe58fdSRyusuke Konishi }
63505fe58fdSRyusuke Konishi unlock_new_inode(inode);
63605fe58fdSRyusuke Konishi return inode;
63705fe58fdSRyusuke Konishi }
63805fe58fdSRyusuke Konishi
639263d90ceSRyusuke Konishi struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
640263d90ceSRyusuke Konishi __u64 cno)
641263d90ceSRyusuke Konishi {
6424d8d9293SRyusuke Konishi struct nilfs_iget_args args = {
643e897be17SRyusuke Konishi .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
6446e211930SRyusuke Konishi .for_btnc = false, .for_shadow = false
6454d8d9293SRyusuke Konishi };
646263d90ceSRyusuke Konishi struct inode *inode;
647263d90ceSRyusuke Konishi int err;
648263d90ceSRyusuke Konishi
649263d90ceSRyusuke Konishi inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
650263d90ceSRyusuke Konishi if (unlikely(!inode))
651263d90ceSRyusuke Konishi return ERR_PTR(-ENOMEM);
652263d90ceSRyusuke Konishi if (!(inode->i_state & I_NEW))
653263d90ceSRyusuke Konishi return inode;
654263d90ceSRyusuke Konishi
655263d90ceSRyusuke Konishi err = nilfs_init_gcinode(inode);
656263d90ceSRyusuke Konishi if (unlikely(err)) {
657263d90ceSRyusuke Konishi iget_failed(inode);
658263d90ceSRyusuke Konishi return ERR_PTR(err);
659263d90ceSRyusuke Konishi }
660263d90ceSRyusuke Konishi unlock_new_inode(inode);
661263d90ceSRyusuke Konishi return inode;
662263d90ceSRyusuke Konishi }
663263d90ceSRyusuke Konishi
664e897be17SRyusuke Konishi /**
665e897be17SRyusuke Konishi * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
666e897be17SRyusuke Konishi * @inode: inode object
nilfs_attach_btree_node_cache(struct inode * inode)667e897be17SRyusuke Konishi *
668e897be17SRyusuke Konishi * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
669e897be17SRyusuke Konishi * or does nothing if the inode already has it. This function allocates
670e897be17SRyusuke Konishi * an additional inode to maintain page cache of B-tree nodes one-on-one.
671e897be17SRyusuke Konishi *
672e897be17SRyusuke Konishi * Return Value: On success, 0 is returned. On errors, one of the following
673e897be17SRyusuke Konishi * negative error code is returned.
674e897be17SRyusuke Konishi *
675e897be17SRyusuke Konishi * %-ENOMEM - Insufficient memory available.
676e897be17SRyusuke Konishi */
677e897be17SRyusuke Konishi int nilfs_attach_btree_node_cache(struct inode *inode)
678e897be17SRyusuke Konishi {
679e897be17SRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
680e897be17SRyusuke Konishi struct inode *btnc_inode;
681e897be17SRyusuke Konishi struct nilfs_iget_args args;
682e897be17SRyusuke Konishi
683e897be17SRyusuke Konishi if (ii->i_assoc_inode)
684e897be17SRyusuke Konishi return 0;
685e897be17SRyusuke Konishi
686e897be17SRyusuke Konishi args.ino = inode->i_ino;
687e897be17SRyusuke Konishi args.root = ii->i_root;
688e897be17SRyusuke Konishi args.cno = ii->i_cno;
689e897be17SRyusuke Konishi args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
690e897be17SRyusuke Konishi args.for_btnc = true;
6916e211930SRyusuke Konishi args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
692e897be17SRyusuke Konishi
693e897be17SRyusuke Konishi btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
694e897be17SRyusuke Konishi nilfs_iget_set, &args);
695e897be17SRyusuke Konishi if (unlikely(!btnc_inode))
696e897be17SRyusuke Konishi return -ENOMEM;
697e897be17SRyusuke Konishi if (btnc_inode->i_state & I_NEW) {
698e897be17SRyusuke Konishi nilfs_init_btnc_inode(btnc_inode);
699e897be17SRyusuke Konishi unlock_new_inode(btnc_inode);
700e897be17SRyusuke Konishi }
701e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_assoc_inode = inode;
702e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
703e897be17SRyusuke Konishi ii->i_assoc_inode = btnc_inode;
704e897be17SRyusuke Konishi
705e897be17SRyusuke Konishi return 0;
706e897be17SRyusuke Konishi }
707e897be17SRyusuke Konishi
708e897be17SRyusuke Konishi /**
709e897be17SRyusuke Konishi * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
710e897be17SRyusuke Konishi * @inode: inode object
711e897be17SRyusuke Konishi *
712e897be17SRyusuke Konishi * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
713e897be17SRyusuke Konishi * holder inode bound to @inode, or does nothing if @inode doesn't have it.
714e897be17SRyusuke Konishi */
715e897be17SRyusuke Konishi void nilfs_detach_btree_node_cache(struct inode *inode)
716e897be17SRyusuke Konishi {
717e897be17SRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
718e897be17SRyusuke Konishi struct inode *btnc_inode = ii->i_assoc_inode;
719e897be17SRyusuke Konishi
720e897be17SRyusuke Konishi if (btnc_inode) {
721e897be17SRyusuke Konishi NILFS_I(btnc_inode)->i_assoc_inode = NULL;
722e897be17SRyusuke Konishi ii->i_assoc_inode = NULL;
723e897be17SRyusuke Konishi iput(btnc_inode);
724e897be17SRyusuke Konishi }
725e897be17SRyusuke Konishi }
726e897be17SRyusuke Konishi
7276e211930SRyusuke Konishi /**
7286e211930SRyusuke Konishi * nilfs_iget_for_shadow - obtain inode for shadow mapping
7296e211930SRyusuke Konishi * @inode: inode object that uses shadow mapping
7306e211930SRyusuke Konishi *
7316e211930SRyusuke Konishi * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
nilfs_iget_for_shadow(struct inode * inode)7326e211930SRyusuke Konishi * caches for shadow mapping. The page cache for data pages is set up
7336e211930SRyusuke Konishi * in one inode and the one for b-tree node pages is set up in the
7346e211930SRyusuke Konishi * other inode, which is attached to the former inode.
7356e211930SRyusuke Konishi *
7366e211930SRyusuke Konishi * Return Value: On success, a pointer to the inode for data pages is
7376e211930SRyusuke Konishi * returned. On errors, one of the following negative error code is returned
7386e211930SRyusuke Konishi * in a pointer type.
7396e211930SRyusuke Konishi *
7406e211930SRyusuke Konishi * %-ENOMEM - Insufficient memory available.
7416e211930SRyusuke Konishi */
7426e211930SRyusuke Konishi struct inode *nilfs_iget_for_shadow(struct inode *inode)
7436e211930SRyusuke Konishi {
7446e211930SRyusuke Konishi struct nilfs_iget_args args = {
7456e211930SRyusuke Konishi .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
7466e211930SRyusuke Konishi .for_btnc = false, .for_shadow = true
7476e211930SRyusuke Konishi };
7486e211930SRyusuke Konishi struct inode *s_inode;
7496e211930SRyusuke Konishi int err;
7506e211930SRyusuke Konishi
7516e211930SRyusuke Konishi s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
7526e211930SRyusuke Konishi nilfs_iget_set, &args);
7536e211930SRyusuke Konishi if (unlikely(!s_inode))
7546e211930SRyusuke Konishi return ERR_PTR(-ENOMEM);
7556e211930SRyusuke Konishi if (!(s_inode->i_state & I_NEW))
7566e211930SRyusuke Konishi return inode;
7576e211930SRyusuke Konishi
7586e211930SRyusuke Konishi NILFS_I(s_inode)->i_flags = 0;
7596e211930SRyusuke Konishi memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
7606e211930SRyusuke Konishi mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
761cfb608b4SRyusuke Konishi s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
7626e211930SRyusuke Konishi
7636e211930SRyusuke Konishi err = nilfs_attach_btree_node_cache(s_inode);
7646e211930SRyusuke Konishi if (unlikely(err)) {
7656e211930SRyusuke Konishi iget_failed(s_inode);
7666e211930SRyusuke Konishi return ERR_PTR(err);
7676e211930SRyusuke Konishi }
7686e211930SRyusuke Konishi unlock_new_inode(s_inode);
7696e211930SRyusuke Konishi return s_inode;
7706e211930SRyusuke Konishi }
7716e211930SRyusuke Konishi
77205fe58fdSRyusuke Konishi void nilfs_write_inode_common(struct inode *inode,
77305fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode, int has_bmap)
77405fe58fdSRyusuke Konishi {
77505fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
77605fe58fdSRyusuke Konishi
77705fe58fdSRyusuke Konishi raw_inode->i_mode = cpu_to_le16(inode->i_mode);
778305d3d0dSEric W. Biederman raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
779305d3d0dSEric W. Biederman raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
78005fe58fdSRyusuke Konishi raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
78105fe58fdSRyusuke Konishi raw_inode->i_size = cpu_to_le64(inode->i_size);
782e21d4f41SJeff Layton raw_inode->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
78305fe58fdSRyusuke Konishi raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
784e21d4f41SJeff Layton raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
78561239230SRyusuke Konishi raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
78605fe58fdSRyusuke Konishi raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
78705fe58fdSRyusuke Konishi
78805fe58fdSRyusuke Konishi raw_inode->i_flags = cpu_to_le32(ii->i_flags);
78905fe58fdSRyusuke Konishi raw_inode->i_generation = cpu_to_le32(inode->i_generation);
79005fe58fdSRyusuke Konishi
79156eb5538SRyusuke Konishi if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
79256eb5538SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
79356eb5538SRyusuke Konishi
79456eb5538SRyusuke Konishi /* zero-fill unused portion in the case of super root block */
79556eb5538SRyusuke Konishi raw_inode->i_xattr = 0;
79656eb5538SRyusuke Konishi raw_inode->i_pad = 0;
79756eb5538SRyusuke Konishi memset((void *)raw_inode + sizeof(*raw_inode), 0,
79856eb5538SRyusuke Konishi nilfs->ns_inode_size - sizeof(*raw_inode));
79956eb5538SRyusuke Konishi }
80056eb5538SRyusuke Konishi
nilfs_update_inode(struct inode * inode,struct buffer_head * ibh,int flags)80105fe58fdSRyusuke Konishi if (has_bmap)
80205fe58fdSRyusuke Konishi nilfs_bmap_write(ii->i_bmap, raw_inode);
80305fe58fdSRyusuke Konishi else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
80405fe58fdSRyusuke Konishi raw_inode->i_device_code =
805cdce214eSRyusuke Konishi cpu_to_le64(huge_encode_dev(inode->i_rdev));
806076a378bSRyusuke Konishi /*
807076a378bSRyusuke Konishi * When extending inode, nilfs->ns_inode_size should be checked
808076a378bSRyusuke Konishi * for substitutions of appended fields.
809076a378bSRyusuke Konishi */
81005fe58fdSRyusuke Konishi }
81105fe58fdSRyusuke Konishi
812b9f66140SAndreas Rohner void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
81305fe58fdSRyusuke Konishi {
81405fe58fdSRyusuke Konishi ino_t ino = inode->i_ino;
81505fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
816e912a5b6SRyusuke Konishi struct inode *ifile = ii->i_root->ifile;
81705fe58fdSRyusuke Konishi struct nilfs_inode *raw_inode;
81805fe58fdSRyusuke Konishi
819e912a5b6SRyusuke Konishi raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
82005fe58fdSRyusuke Konishi
82105fe58fdSRyusuke Konishi if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
822e912a5b6SRyusuke Konishi memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
823b9f66140SAndreas Rohner if (flags & I_DIRTY_DATASYNC)
824b9f66140SAndreas Rohner set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
82505fe58fdSRyusuke Konishi
82605fe58fdSRyusuke Konishi nilfs_write_inode_common(inode, raw_inode, 0);
nilfs_truncate_bmap(struct nilfs_inode_info * ii,unsigned long from)827076a378bSRyusuke Konishi /*
828076a378bSRyusuke Konishi * XXX: call with has_bmap = 0 is a workaround to avoid
829076a378bSRyusuke Konishi * deadlock of bmap. This delays update of i_bmap to just
830076a378bSRyusuke Konishi * before writing.
831076a378bSRyusuke Konishi */
832076a378bSRyusuke Konishi
833e912a5b6SRyusuke Konishi nilfs_ifile_unmap_inode(ifile, ino, ibh);
83405fe58fdSRyusuke Konishi }
83505fe58fdSRyusuke Konishi
83605fe58fdSRyusuke Konishi #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
83705fe58fdSRyusuke Konishi
83805fe58fdSRyusuke Konishi static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
83905fe58fdSRyusuke Konishi unsigned long from)
84005fe58fdSRyusuke Konishi {
8413568a13fSRyusuke Konishi __u64 b;
84205fe58fdSRyusuke Konishi int ret;
84305fe58fdSRyusuke Konishi
84405fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_BMAP, &ii->i_state))
84505fe58fdSRyusuke Konishi return;
84605fe58fdSRyusuke Konishi repeat:
84705fe58fdSRyusuke Konishi ret = nilfs_bmap_last_key(ii->i_bmap, &b);
84805fe58fdSRyusuke Konishi if (ret == -ENOENT)
84905fe58fdSRyusuke Konishi return;
85005fe58fdSRyusuke Konishi else if (ret < 0)
85105fe58fdSRyusuke Konishi goto failed;
85205fe58fdSRyusuke Konishi
85305fe58fdSRyusuke Konishi if (b < from)
85405fe58fdSRyusuke Konishi return;
85505fe58fdSRyusuke Konishi
8563568a13fSRyusuke Konishi b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
nilfs_truncate(struct inode * inode)85705fe58fdSRyusuke Konishi ret = nilfs_bmap_truncate(ii->i_bmap, b);
85805fe58fdSRyusuke Konishi nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
85905fe58fdSRyusuke Konishi if (!ret || (ret == -ENOMEM &&
86005fe58fdSRyusuke Konishi nilfs_bmap_truncate(ii->i_bmap, b) == 0))
86105fe58fdSRyusuke Konishi goto repeat;
86205fe58fdSRyusuke Konishi
86305fe58fdSRyusuke Konishi failed:
864a1d0747aSJoe Perches nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
865a1d0747aSJoe Perches ret, ii->vfs_inode.i_ino);
86605fe58fdSRyusuke Konishi }
86705fe58fdSRyusuke Konishi
86805fe58fdSRyusuke Konishi void nilfs_truncate(struct inode *inode)
86905fe58fdSRyusuke Konishi {
87005fe58fdSRyusuke Konishi unsigned long blkoff;
87105fe58fdSRyusuke Konishi unsigned int blocksize;
87205fe58fdSRyusuke Konishi struct nilfs_transaction_info ti;
87305fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb;
87405fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
87505fe58fdSRyusuke Konishi
87605fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_BMAP, &ii->i_state))
87705fe58fdSRyusuke Konishi return;
87805fe58fdSRyusuke Konishi if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
87905fe58fdSRyusuke Konishi return;
88005fe58fdSRyusuke Konishi
88105fe58fdSRyusuke Konishi blocksize = sb->s_blocksize;
88205fe58fdSRyusuke Konishi blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
8831f5abe7eSRyusuke Konishi nilfs_transaction_begin(sb, &ti, 0); /* never fails */
88405fe58fdSRyusuke Konishi
88505fe58fdSRyusuke Konishi block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
88605fe58fdSRyusuke Konishi
88705fe58fdSRyusuke Konishi nilfs_truncate_bmap(ii, blkoff);
88805fe58fdSRyusuke Konishi
889e21d4f41SJeff Layton inode->i_mtime = inode_set_ctime_current(inode);
89005fe58fdSRyusuke Konishi if (IS_SYNC(inode))
nilfs_clear_inode(struct inode * inode)89105fe58fdSRyusuke Konishi nilfs_set_transaction_flag(NILFS_TI_SYNC);
89205fe58fdSRyusuke Konishi
893abdb318bSJiro SEKIBA nilfs_mark_inode_dirty(inode);
894bcbc8c64SRyusuke Konishi nilfs_set_file_dirty(inode, 0);
89547420c79SRyusuke Konishi nilfs_transaction_commit(sb);
896076a378bSRyusuke Konishi /*
897076a378bSRyusuke Konishi * May construct a logical segment and may fail in sync mode.
898076a378bSRyusuke Konishi * But truncate has no return value.
899076a378bSRyusuke Konishi */
90005fe58fdSRyusuke Konishi }
90105fe58fdSRyusuke Konishi
9026fd1e5c9SAl Viro static void nilfs_clear_inode(struct inode *inode)
9036fd1e5c9SAl Viro {
9046fd1e5c9SAl Viro struct nilfs_inode_info *ii = NILFS_I(inode);
9056fd1e5c9SAl Viro
9066fd1e5c9SAl Viro /*
9076fd1e5c9SAl Viro * Free resources allocated in nilfs_read_inode(), here.
9086fd1e5c9SAl Viro */
9096fd1e5c9SAl Viro BUG_ON(!list_empty(&ii->i_dirty));
9106fd1e5c9SAl Viro brelse(ii->i_bh);
9116fd1e5c9SAl Viro ii->i_bh = NULL;
9126fd1e5c9SAl Viro
9132d19961dSRyusuke Konishi if (nilfs_is_metadata_file_inode(inode))
9142d19961dSRyusuke Konishi nilfs_mdt_clear(inode);
nilfs_evict_inode(struct inode * inode)915518d1a6aSRyusuke Konishi
9166fd1e5c9SAl Viro if (test_bit(NILFS_I_BMAP, &ii->i_state))
9176fd1e5c9SAl Viro nilfs_bmap_clear(ii->i_bmap);
9186fd1e5c9SAl Viro
919e897be17SRyusuke Konishi if (!test_bit(NILFS_I_BTNC, &ii->i_state))
920e897be17SRyusuke Konishi nilfs_detach_btree_node_cache(inode);
9214d8d9293SRyusuke Konishi
9224d8d9293SRyusuke Konishi if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
9234d8d9293SRyusuke Konishi nilfs_put_root(ii->i_root);
9246fd1e5c9SAl Viro }
9256fd1e5c9SAl Viro
9266fd1e5c9SAl Viro void nilfs_evict_inode(struct inode *inode)
92705fe58fdSRyusuke Konishi {
92805fe58fdSRyusuke Konishi struct nilfs_transaction_info ti;
92905fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb;
93005fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
9319b5a04acSRyusuke Konishi struct the_nilfs *nilfs;
93225b18d39SRyusuke Konishi int ret;
93305fe58fdSRyusuke Konishi
9344d8d9293SRyusuke Konishi if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
93591b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data);
936dbd5768fSJan Kara clear_inode(inode);
9376fd1e5c9SAl Viro nilfs_clear_inode(inode);
93805fe58fdSRyusuke Konishi return;
93905fe58fdSRyusuke Konishi }
9401f5abe7eSRyusuke Konishi nilfs_transaction_begin(sb, &ti, 0); /* never fails */
9411f5abe7eSRyusuke Konishi
94291b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data);
94305fe58fdSRyusuke Konishi
9449b5a04acSRyusuke Konishi nilfs = sb->s_fs_info;
9459b5a04acSRyusuke Konishi if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
9469b5a04acSRyusuke Konishi /*
9479b5a04acSRyusuke Konishi * If this inode is about to be disposed after the file system
9489b5a04acSRyusuke Konishi * has been degraded to read-only due to file system corruption
9499b5a04acSRyusuke Konishi * or after the writer has been detached, do not make any
9509b5a04acSRyusuke Konishi * changes that cause writes, just clear it.
9519b5a04acSRyusuke Konishi * Do this check after read-locking ns_segctor_sem by
9529b5a04acSRyusuke Konishi * nilfs_transaction_begin() in order to avoid a race with
9539b5a04acSRyusuke Konishi * the writer detach operation.
9549b5a04acSRyusuke Konishi */
9559b5a04acSRyusuke Konishi clear_inode(inode);
9569b5a04acSRyusuke Konishi nilfs_clear_inode(inode);
9579b5a04acSRyusuke Konishi nilfs_transaction_abort(sb);
9589b5a04acSRyusuke Konishi return;
9599b5a04acSRyusuke Konishi }
9609b5a04acSRyusuke Konishi
961e912a5b6SRyusuke Konishi /* TODO: some of the following operations may fail. */
96205fe58fdSRyusuke Konishi nilfs_truncate_bmap(ii, 0);
963abdb318bSJiro SEKIBA nilfs_mark_inode_dirty(inode);
964dbd5768fSJan Kara clear_inode(inode);
965e912a5b6SRyusuke Konishi
96625b18d39SRyusuke Konishi ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
96725b18d39SRyusuke Konishi if (!ret)
968e5f7f848SVyacheslav Dubeyko atomic64_dec(&ii->i_root->inodes_count);
969e912a5b6SRyusuke Konishi
nilfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * iattr)9706fd1e5c9SAl Viro nilfs_clear_inode(inode);
971e912a5b6SRyusuke Konishi
97205fe58fdSRyusuke Konishi if (IS_SYNC(inode))
97305fe58fdSRyusuke Konishi nilfs_set_transaction_flag(NILFS_TI_SYNC);
97447420c79SRyusuke Konishi nilfs_transaction_commit(sb);
975076a378bSRyusuke Konishi /*
976076a378bSRyusuke Konishi * May construct a logical segment and may fail in sync mode.
977076a378bSRyusuke Konishi * But delete_inode has no return value.
978076a378bSRyusuke Konishi */
97905fe58fdSRyusuke Konishi }
98005fe58fdSRyusuke Konishi
981c1632a0fSChristian Brauner int nilfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
982549c7297SChristian Brauner struct iattr *iattr)
98305fe58fdSRyusuke Konishi {
98405fe58fdSRyusuke Konishi struct nilfs_transaction_info ti;
9852b0143b5SDavid Howells struct inode *inode = d_inode(dentry);
98605fe58fdSRyusuke Konishi struct super_block *sb = inode->i_sb;
98747420c79SRyusuke Konishi int err;
98805fe58fdSRyusuke Konishi
989c1632a0fSChristian Brauner err = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
99005fe58fdSRyusuke Konishi if (err)
99105fe58fdSRyusuke Konishi return err;
99205fe58fdSRyusuke Konishi
99305fe58fdSRyusuke Konishi err = nilfs_transaction_begin(sb, &ti, 0);
99405fe58fdSRyusuke Konishi if (unlikely(err))
99505fe58fdSRyusuke Konishi return err;
99647420c79SRyusuke Konishi
9971025774cSChristoph Hellwig if ((iattr->ia_valid & ATTR_SIZE) &&
9981025774cSChristoph Hellwig iattr->ia_size != i_size_read(inode)) {
999562c72aaSChristoph Hellwig inode_dio_wait(inode);
10002d1b399bSMarco Stornelli truncate_setsize(inode, iattr->ia_size);
10012d1b399bSMarco Stornelli nilfs_truncate(inode);
10021025774cSChristoph Hellwig }
10031025774cSChristoph Hellwig
1004c1632a0fSChristian Brauner setattr_copy(&nop_mnt_idmap, inode, iattr);
10051025774cSChristoph Hellwig mark_inode_dirty(inode);
10061025774cSChristoph Hellwig
10071025774cSChristoph Hellwig if (iattr->ia_valid & ATTR_MODE) {
10081025774cSChristoph Hellwig err = nilfs_acl_chmod(inode);
nilfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)10091025774cSChristoph Hellwig if (unlikely(err))
10101025774cSChristoph Hellwig goto out_err;
10111025774cSChristoph Hellwig }
10121025774cSChristoph Hellwig
10131025774cSChristoph Hellwig return nilfs_transaction_commit(sb);
10141025774cSChristoph Hellwig
10151025774cSChristoph Hellwig out_err:
10161025774cSChristoph Hellwig nilfs_transaction_abort(sb);
101747420c79SRyusuke Konishi return err;
101805fe58fdSRyusuke Konishi }
101905fe58fdSRyusuke Konishi
10204609e1f1SChristian Brauner int nilfs_permission(struct mnt_idmap *idmap, struct inode *inode,
nilfs_load_inode_block(struct inode * inode,struct buffer_head ** pbh)1021549c7297SChristian Brauner int mask)
1022dc3d3b81SRyusuke Konishi {
1023730e908fSAl Viro struct nilfs_root *root = NILFS_I(inode)->i_root;
10244ad364caSRyusuke Konishi
1025dc3d3b81SRyusuke Konishi if ((mask & MAY_WRITE) && root &&
1026dc3d3b81SRyusuke Konishi root->cno != NILFS_CPTREE_CURRENT_CNO)
1027dc3d3b81SRyusuke Konishi return -EROFS; /* snapshot is not writable */
1028dc3d3b81SRyusuke Konishi
10294609e1f1SChristian Brauner return generic_permission(&nop_mnt_idmap, inode, mask);
1030dc3d3b81SRyusuke Konishi }
1031dc3d3b81SRyusuke Konishi
1032bcbc8c64SRyusuke Konishi int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
103305fe58fdSRyusuke Konishi {
1034e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
103505fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
103605fe58fdSRyusuke Konishi int err;
103705fe58fdSRyusuke Konishi
1038693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock);
1039cdaac8e7SRyusuke Konishi if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
1040693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock);
1041e912a5b6SRyusuke Konishi err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1042e912a5b6SRyusuke Konishi inode->i_ino, pbh);
104305fe58fdSRyusuke Konishi if (unlikely(err))
104405fe58fdSRyusuke Konishi return err;
1045693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock);
104605fe58fdSRyusuke Konishi if (ii->i_bh == NULL)
104705fe58fdSRyusuke Konishi ii->i_bh = *pbh;
1048cdaac8e7SRyusuke Konishi else if (unlikely(!buffer_uptodate(ii->i_bh))) {
1049cdaac8e7SRyusuke Konishi __brelse(ii->i_bh);
1050cdaac8e7SRyusuke Konishi ii->i_bh = *pbh;
1051cdaac8e7SRyusuke Konishi } else {
nilfs_inode_dirty(struct inode * inode)105205fe58fdSRyusuke Konishi brelse(*pbh);
105305fe58fdSRyusuke Konishi *pbh = ii->i_bh;
105405fe58fdSRyusuke Konishi }
105505fe58fdSRyusuke Konishi } else
105605fe58fdSRyusuke Konishi *pbh = ii->i_bh;
105705fe58fdSRyusuke Konishi
105805fe58fdSRyusuke Konishi get_bh(*pbh);
1059693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock);
106005fe58fdSRyusuke Konishi return 0;
106105fe58fdSRyusuke Konishi }
106205fe58fdSRyusuke Konishi
106305fe58fdSRyusuke Konishi int nilfs_inode_dirty(struct inode *inode)
106405fe58fdSRyusuke Konishi {
106505fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
1066e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
nilfs_set_file_dirty(struct inode * inode,unsigned int nr_dirty)106705fe58fdSRyusuke Konishi int ret = 0;
106805fe58fdSRyusuke Konishi
106905fe58fdSRyusuke Konishi if (!list_empty(&ii->i_dirty)) {
1070693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock);
107105fe58fdSRyusuke Konishi ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
107205fe58fdSRyusuke Konishi test_bit(NILFS_I_BUSY, &ii->i_state);
1073693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock);
107405fe58fdSRyusuke Konishi }
107505fe58fdSRyusuke Konishi return ret;
107605fe58fdSRyusuke Konishi }
107705fe58fdSRyusuke Konishi
10780c6c44cbSRyusuke Konishi int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
107905fe58fdSRyusuke Konishi {
108005fe58fdSRyusuke Konishi struct nilfs_inode_info *ii = NILFS_I(inode);
1081e3154e97SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
108205fe58fdSRyusuke Konishi
1083693dd321SRyusuke Konishi atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
108405fe58fdSRyusuke Konishi
1085458c5b08SRyusuke Konishi if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
108605fe58fdSRyusuke Konishi return 0;
108705fe58fdSRyusuke Konishi
1088693dd321SRyusuke Konishi spin_lock(&nilfs->ns_inode_lock);
108905fe58fdSRyusuke Konishi if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
109005fe58fdSRyusuke Konishi !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1091076a378bSRyusuke Konishi /*
1092076a378bSRyusuke Konishi * Because this routine may race with nilfs_dispose_list(),
1093076a378bSRyusuke Konishi * we have to check NILFS_I_QUEUED here, too.
1094076a378bSRyusuke Konishi */
109505fe58fdSRyusuke Konishi if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1096076a378bSRyusuke Konishi /*
1097076a378bSRyusuke Konishi * This will happen when somebody is freeing
1098076a378bSRyusuke Konishi * this inode.
1099076a378bSRyusuke Konishi */
1100a1d0747aSJoe Perches nilfs_warn(inode->i_sb,
1101d6517debSRyusuke Konishi "cannot set file dirty (ino=%lu): the file is being freed",
110205fe58fdSRyusuke Konishi inode->i_ino);
1103693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock);
1104076a378bSRyusuke Konishi return -EINVAL; /*
__nilfs_mark_inode_dirty(struct inode * inode,int flags)1105076a378bSRyusuke Konishi * NILFS_I_DIRTY may remain for
1106076a378bSRyusuke Konishi * freeing inode.
1107076a378bSRyusuke Konishi */
110805fe58fdSRyusuke Konishi }
1109eaae0f37SNicolas Kaiser list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
111005fe58fdSRyusuke Konishi set_bit(NILFS_I_QUEUED, &ii->i_state);
111105fe58fdSRyusuke Konishi }
1112693dd321SRyusuke Konishi spin_unlock(&nilfs->ns_inode_lock);
111305fe58fdSRyusuke Konishi return 0;
111405fe58fdSRyusuke Konishi }
111505fe58fdSRyusuke Konishi
1116b9f66140SAndreas Rohner int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
111705fe58fdSRyusuke Konishi {
1118f8654743SRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
111905fe58fdSRyusuke Konishi struct buffer_head *ibh;
112005fe58fdSRyusuke Konishi int err;
112105fe58fdSRyusuke Konishi
1122f8654743SRyusuke Konishi /*
1123f8654743SRyusuke Konishi * Do not dirty inodes after the log writer has been detached
1124f8654743SRyusuke Konishi * and its nilfs_root struct has been freed.
1125f8654743SRyusuke Konishi */
1126f8654743SRyusuke Konishi if (unlikely(nilfs_purging(nilfs)))
1127f8654743SRyusuke Konishi return 0;
1128f8654743SRyusuke Konishi
1129bcbc8c64SRyusuke Konishi err = nilfs_load_inode_block(inode, &ibh);
113005fe58fdSRyusuke Konishi if (unlikely(err)) {
1131a1d0747aSJoe Perches nilfs_warn(inode->i_sb,
1132d6517debSRyusuke Konishi "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1133d6517debSRyusuke Konishi inode->i_ino, err);
113405fe58fdSRyusuke Konishi return err;
113505fe58fdSRyusuke Konishi }
1136b9f66140SAndreas Rohner nilfs_update_inode(inode, ibh, flags);
11375fc7b141SRyusuke Konishi mark_buffer_dirty(ibh);
1138e912a5b6SRyusuke Konishi nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
113905fe58fdSRyusuke Konishi brelse(ibh);
114005fe58fdSRyusuke Konishi return 0;
114105fe58fdSRyusuke Konishi }
114205fe58fdSRyusuke Konishi
nilfs_dirty_inode(struct inode * inode,int flags)114305fe58fdSRyusuke Konishi /**
114405fe58fdSRyusuke Konishi * nilfs_dirty_inode - reflect changes on given inode to an inode block.
114505fe58fdSRyusuke Konishi * @inode: inode of the file to be registered.
1146516edb45SYang Li * @flags: flags to determine the dirty state of the inode
114705fe58fdSRyusuke Konishi *
114805fe58fdSRyusuke Konishi * nilfs_dirty_inode() loads a inode block containing the specified
114905fe58fdSRyusuke Konishi * @inode and copies data from a nilfs_inode to a corresponding inode
115005fe58fdSRyusuke Konishi * entry in the inode block. This operation is excluded from the segment
115105fe58fdSRyusuke Konishi * construction. This function can be called both as a single operation
115205fe58fdSRyusuke Konishi * and as a part of indivisible file operations.
115305fe58fdSRyusuke Konishi */
1154aa385729SChristoph Hellwig void nilfs_dirty_inode(struct inode *inode, int flags)
115505fe58fdSRyusuke Konishi {
115605fe58fdSRyusuke Konishi struct nilfs_transaction_info ti;
11577d6cd92fSRyusuke Konishi struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
115805fe58fdSRyusuke Konishi
115905fe58fdSRyusuke Konishi if (is_bad_inode(inode)) {
1160a1d0747aSJoe Perches nilfs_warn(inode->i_sb,
116106f4abf6SRyusuke Konishi "tried to mark bad_inode dirty. ignored.");
116205fe58fdSRyusuke Konishi dump_stack();
116305fe58fdSRyusuke Konishi return;
116405fe58fdSRyusuke Konishi }
11657d6cd92fSRyusuke Konishi if (mdi) {
11667d6cd92fSRyusuke Konishi nilfs_mdt_mark_dirty(inode);
11677d6cd92fSRyusuke Konishi return;
11687d6cd92fSRyusuke Konishi }
116905fe58fdSRyusuke Konishi nilfs_transaction_begin(inode->i_sb, &ti, 0);
1170b9f66140SAndreas Rohner __nilfs_mark_inode_dirty(inode, flags);
117147420c79SRyusuke Konishi nilfs_transaction_commit(inode->i_sb); /* never fails */
117205fe58fdSRyusuke Konishi }
1173622daaffSRyusuke Konishi
1174622daaffSRyusuke Konishi int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1175622daaffSRyusuke Konishi __u64 start, __u64 len)
1176622daaffSRyusuke Konishi {
11770ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1178622daaffSRyusuke Konishi __u64 logical = 0, phys = 0, size = 0;
1179622daaffSRyusuke Konishi __u32 flags = 0;
1180622daaffSRyusuke Konishi loff_t isize;
1181622daaffSRyusuke Konishi sector_t blkoff, end_blkoff;
1182622daaffSRyusuke Konishi sector_t delalloc_blkoff;
1183622daaffSRyusuke Konishi unsigned long delalloc_blklen;
1184622daaffSRyusuke Konishi unsigned int blkbits = inode->i_blkbits;
1185622daaffSRyusuke Konishi int ret, n;
1186622daaffSRyusuke Konishi
118745dd052eSChristoph Hellwig ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1188622daaffSRyusuke Konishi if (ret)
1189622daaffSRyusuke Konishi return ret;
1190622daaffSRyusuke Konishi
11915955102cSAl Viro inode_lock(inode);
1192622daaffSRyusuke Konishi
1193622daaffSRyusuke Konishi isize = i_size_read(inode);
1194622daaffSRyusuke Konishi
1195622daaffSRyusuke Konishi blkoff = start >> blkbits;
1196622daaffSRyusuke Konishi end_blkoff = (start + len - 1) >> blkbits;
1197622daaffSRyusuke Konishi
1198622daaffSRyusuke Konishi delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1199622daaffSRyusuke Konishi &delalloc_blkoff);
1200622daaffSRyusuke Konishi
1201622daaffSRyusuke Konishi do {
1202622daaffSRyusuke Konishi __u64 blkphy;
1203622daaffSRyusuke Konishi unsigned int maxblocks;
1204622daaffSRyusuke Konishi
1205622daaffSRyusuke Konishi if (delalloc_blklen && blkoff == delalloc_blkoff) {
1206622daaffSRyusuke Konishi if (size) {
1207622daaffSRyusuke Konishi /* End of the current extent */
1208622daaffSRyusuke Konishi ret = fiemap_fill_next_extent(
1209622daaffSRyusuke Konishi fieinfo, logical, phys, size, flags);
1210622daaffSRyusuke Konishi if (ret)
1211622daaffSRyusuke Konishi break;
1212622daaffSRyusuke Konishi }
1213622daaffSRyusuke Konishi if (blkoff > end_blkoff)
1214622daaffSRyusuke Konishi break;
1215622daaffSRyusuke Konishi
1216622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1217622daaffSRyusuke Konishi logical = blkoff << blkbits;
1218622daaffSRyusuke Konishi phys = 0;
1219622daaffSRyusuke Konishi size = delalloc_blklen << blkbits;
1220622daaffSRyusuke Konishi
1221622daaffSRyusuke Konishi blkoff = delalloc_blkoff + delalloc_blklen;
1222622daaffSRyusuke Konishi delalloc_blklen = nilfs_find_uncommitted_extent(
1223622daaffSRyusuke Konishi inode, blkoff, &delalloc_blkoff);
1224622daaffSRyusuke Konishi continue;
1225622daaffSRyusuke Konishi }
1226622daaffSRyusuke Konishi
1227622daaffSRyusuke Konishi /*
1228622daaffSRyusuke Konishi * Limit the number of blocks that we look up so as
1229622daaffSRyusuke Konishi * not to get into the next delayed allocation extent.
1230622daaffSRyusuke Konishi */
1231622daaffSRyusuke Konishi maxblocks = INT_MAX;
1232622daaffSRyusuke Konishi if (delalloc_blklen)
1233622daaffSRyusuke Konishi maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1234622daaffSRyusuke Konishi maxblocks);
1235622daaffSRyusuke Konishi blkphy = 0;
1236622daaffSRyusuke Konishi
1237622daaffSRyusuke Konishi down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1238622daaffSRyusuke Konishi n = nilfs_bmap_lookup_contig(
1239622daaffSRyusuke Konishi NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1240622daaffSRyusuke Konishi up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1241622daaffSRyusuke Konishi
1242622daaffSRyusuke Konishi if (n < 0) {
1243622daaffSRyusuke Konishi int past_eof;
1244622daaffSRyusuke Konishi
1245622daaffSRyusuke Konishi if (unlikely(n != -ENOENT))
1246622daaffSRyusuke Konishi break; /* error */
1247622daaffSRyusuke Konishi
1248622daaffSRyusuke Konishi /* HOLE */
1249622daaffSRyusuke Konishi blkoff++;
1250622daaffSRyusuke Konishi past_eof = ((blkoff << blkbits) >= isize);
1251622daaffSRyusuke Konishi
1252622daaffSRyusuke Konishi if (size) {
1253622daaffSRyusuke Konishi /* End of the current extent */
1254622daaffSRyusuke Konishi
1255622daaffSRyusuke Konishi if (past_eof)
1256622daaffSRyusuke Konishi flags |= FIEMAP_EXTENT_LAST;
1257622daaffSRyusuke Konishi
1258622daaffSRyusuke Konishi ret = fiemap_fill_next_extent(
1259622daaffSRyusuke Konishi fieinfo, logical, phys, size, flags);
1260622daaffSRyusuke Konishi if (ret)
1261622daaffSRyusuke Konishi break;
1262622daaffSRyusuke Konishi size = 0;
1263622daaffSRyusuke Konishi }
1264622daaffSRyusuke Konishi if (blkoff > end_blkoff || past_eof)
1265622daaffSRyusuke Konishi break;
1266622daaffSRyusuke Konishi } else {
1267622daaffSRyusuke Konishi if (size) {
1268622daaffSRyusuke Konishi if (phys && blkphy << blkbits == phys + size) {
1269622daaffSRyusuke Konishi /* The current extent goes on */
1270622daaffSRyusuke Konishi size += n << blkbits;
1271622daaffSRyusuke Konishi } else {
1272622daaffSRyusuke Konishi /* Terminate the current extent */
1273622daaffSRyusuke Konishi ret = fiemap_fill_next_extent(
1274622daaffSRyusuke Konishi fieinfo, logical, phys, size,
1275622daaffSRyusuke Konishi flags);
1276622daaffSRyusuke Konishi if (ret || blkoff > end_blkoff)
1277622daaffSRyusuke Konishi break;
1278622daaffSRyusuke Konishi
1279622daaffSRyusuke Konishi /* Start another extent */
1280622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED;
1281622daaffSRyusuke Konishi logical = blkoff << blkbits;
1282622daaffSRyusuke Konishi phys = blkphy << blkbits;
1283622daaffSRyusuke Konishi size = n << blkbits;
1284622daaffSRyusuke Konishi }
1285622daaffSRyusuke Konishi } else {
1286622daaffSRyusuke Konishi /* Start a new extent */
1287622daaffSRyusuke Konishi flags = FIEMAP_EXTENT_MERGED;
1288622daaffSRyusuke Konishi logical = blkoff << blkbits;
1289622daaffSRyusuke Konishi phys = blkphy << blkbits;
1290622daaffSRyusuke Konishi size = n << blkbits;
1291622daaffSRyusuke Konishi }
1292622daaffSRyusuke Konishi blkoff += n;
1293622daaffSRyusuke Konishi }
1294622daaffSRyusuke Konishi cond_resched();
1295622daaffSRyusuke Konishi } while (true);
1296622daaffSRyusuke Konishi
1297622daaffSRyusuke Konishi /* If ret is 1 then we just hit the end of the extent array */
1298622daaffSRyusuke Konishi if (ret == 1)
1299622daaffSRyusuke Konishi ret = 0;
1300622daaffSRyusuke Konishi
13015955102cSAl Viro inode_unlock(inode);
1302622daaffSRyusuke Konishi return ret;
1303622daaffSRyusuke Konishi }
1304