xref: /openbmc/linux/fs/nilfs2/inode.c (revision b04b4f78)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/uio.h>
28 #include "nilfs.h"
29 #include "segment.h"
30 #include "page.h"
31 #include "mdt.h"
32 #include "cpfile.h"
33 #include "ifile.h"
34 
35 
36 /**
37  * nilfs_get_block() - get a file block on the filesystem (callback function)
38  * @inode - inode struct of the target file
39  * @blkoff - file block number
40  * @bh_result - buffer head to be mapped on
41  * @create - indicate whether allocating the block or not when it has not
42  *      been allocated yet.
43  *
44  * This function does not issue actual read request of the specified data
45  * block. It is done by VFS.
46  * Bulk read for direct-io is not supported yet. (should be supported)
47  */
48 int nilfs_get_block(struct inode *inode, sector_t blkoff,
49 		    struct buffer_head *bh_result, int create)
50 {
51 	struct nilfs_inode_info *ii = NILFS_I(inode);
52 	unsigned long blknum = 0;
53 	int err = 0, ret;
54 	struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
55 
56 	/* This exclusion control is a workaround; should be revised */
57 	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
58 	ret = nilfs_bmap_lookup(ii->i_bmap, (unsigned long)blkoff, &blknum);
59 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
60 	if (ret == 0) {	/* found */
61 		map_bh(bh_result, inode->i_sb, blknum);
62 		goto out;
63 	}
64 	/* data block was not found */
65 	if (ret == -ENOENT && create) {
66 		struct nilfs_transaction_info ti;
67 
68 		bh_result->b_blocknr = 0;
69 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
70 		if (unlikely(err))
71 			goto out;
72 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
73 					(unsigned long)bh_result);
74 		if (unlikely(err != 0)) {
75 			if (err == -EEXIST) {
76 				/*
77 				 * The get_block() function could be called
78 				 * from multiple callers for an inode.
79 				 * However, the page having this block must
80 				 * be locked in this case.
81 				 */
82 				printk(KERN_WARNING
83 				       "nilfs_get_block: a race condition "
84 				       "while inserting a data block. "
85 				       "(inode number=%lu, file block "
86 				       "offset=%llu)\n",
87 				       inode->i_ino,
88 				       (unsigned long long)blkoff);
89 				err = 0;
90 			} else if (err == -EINVAL) {
91 				nilfs_error(inode->i_sb, __func__,
92 					    "broken bmap (inode=%lu)\n",
93 					    inode->i_ino);
94 				err = -EIO;
95 			}
96 			nilfs_transaction_abort(inode->i_sb);
97 			goto out;
98 		}
99 		nilfs_transaction_commit(inode->i_sb); /* never fails */
100 		/* Error handling should be detailed */
101 		set_buffer_new(bh_result);
102 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
103 						      to proper value */
104 	} else if (ret == -ENOENT) {
105 		/* not found is not error (e.g. hole); must return without
106 		   the mapped state flag. */
107 		;
108 	} else {
109 		err = ret;
110 	}
111 
112  out:
113 	return err;
114 }
115 
116 /**
117  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
118  * address_space_operations.
119  * @file - file struct of the file to be read
120  * @page - the page to be read
121  */
122 static int nilfs_readpage(struct file *file, struct page *page)
123 {
124 	return mpage_readpage(page, nilfs_get_block);
125 }
126 
127 /**
128  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
129  * address_space_operations.
130  * @file - file struct of the file to be read
131  * @mapping - address_space struct used for reading multiple pages
132  * @pages - the pages to be read
133  * @nr_pages - number of pages to be read
134  */
135 static int nilfs_readpages(struct file *file, struct address_space *mapping,
136 			   struct list_head *pages, unsigned nr_pages)
137 {
138 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
139 }
140 
141 static int nilfs_writepages(struct address_space *mapping,
142 			    struct writeback_control *wbc)
143 {
144 	struct inode *inode = mapping->host;
145 	int err = 0;
146 
147 	if (wbc->sync_mode == WB_SYNC_ALL)
148 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
149 						    wbc->range_start,
150 						    wbc->range_end);
151 	return err;
152 }
153 
154 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
155 {
156 	struct inode *inode = page->mapping->host;
157 	int err;
158 
159 	redirty_page_for_writepage(wbc, page);
160 	unlock_page(page);
161 
162 	if (wbc->sync_mode == WB_SYNC_ALL) {
163 		err = nilfs_construct_segment(inode->i_sb);
164 		if (unlikely(err))
165 			return err;
166 	} else if (wbc->for_reclaim)
167 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
168 
169 	return 0;
170 }
171 
172 static int nilfs_set_page_dirty(struct page *page)
173 {
174 	int ret = __set_page_dirty_buffers(page);
175 
176 	if (ret) {
177 		struct inode *inode = page->mapping->host;
178 		struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
179 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
180 
181 		nilfs_set_file_dirty(sbi, inode, nr_dirty);
182 	}
183 	return ret;
184 }
185 
186 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
187 			     loff_t pos, unsigned len, unsigned flags,
188 			     struct page **pagep, void **fsdata)
189 
190 {
191 	struct inode *inode = mapping->host;
192 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
193 
194 	if (unlikely(err))
195 		return err;
196 
197 	*pagep = NULL;
198 	err = block_write_begin(file, mapping, pos, len, flags, pagep,
199 				fsdata, nilfs_get_block);
200 	if (unlikely(err))
201 		nilfs_transaction_abort(inode->i_sb);
202 	return err;
203 }
204 
205 static int nilfs_write_end(struct file *file, struct address_space *mapping,
206 			   loff_t pos, unsigned len, unsigned copied,
207 			   struct page *page, void *fsdata)
208 {
209 	struct inode *inode = mapping->host;
210 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
211 	unsigned nr_dirty;
212 	int err;
213 
214 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
215 						  start + copied);
216 	copied = generic_write_end(file, mapping, pos, len, copied, page,
217 				   fsdata);
218 	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
219 	err = nilfs_transaction_commit(inode->i_sb);
220 	return err ? : copied;
221 }
222 
223 static ssize_t
224 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
225 		loff_t offset, unsigned long nr_segs)
226 {
227 	struct file *file = iocb->ki_filp;
228 	struct inode *inode = file->f_mapping->host;
229 	ssize_t size;
230 
231 	if (rw == WRITE)
232 		return 0;
233 
234 	/* Needs synchronization with the cleaner */
235 	size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
236 				  offset, nr_segs, nilfs_get_block, NULL);
237 	return size;
238 }
239 
240 struct address_space_operations nilfs_aops = {
241 	.writepage		= nilfs_writepage,
242 	.readpage		= nilfs_readpage,
243 	/* .sync_page		= nilfs_sync_page, */
244 	.writepages		= nilfs_writepages,
245 	.set_page_dirty		= nilfs_set_page_dirty,
246 	.readpages		= nilfs_readpages,
247 	.write_begin		= nilfs_write_begin,
248 	.write_end		= nilfs_write_end,
249 	/* .releasepage		= nilfs_releasepage, */
250 	.invalidatepage		= block_invalidatepage,
251 	.direct_IO		= nilfs_direct_IO,
252 };
253 
254 struct inode *nilfs_new_inode(struct inode *dir, int mode)
255 {
256 	struct super_block *sb = dir->i_sb;
257 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
258 	struct inode *inode;
259 	struct nilfs_inode_info *ii;
260 	int err = -ENOMEM;
261 	ino_t ino;
262 
263 	inode = new_inode(sb);
264 	if (unlikely(!inode))
265 		goto failed;
266 
267 	mapping_set_gfp_mask(inode->i_mapping,
268 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
269 
270 	ii = NILFS_I(inode);
271 	ii->i_state = 1 << NILFS_I_NEW;
272 
273 	err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh);
274 	if (unlikely(err))
275 		goto failed_ifile_create_inode;
276 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
277 
278 	atomic_inc(&sbi->s_inodes_count);
279 
280 	inode->i_uid = current_fsuid();
281 	if (dir->i_mode & S_ISGID) {
282 		inode->i_gid = dir->i_gid;
283 		if (S_ISDIR(mode))
284 			mode |= S_ISGID;
285 	} else
286 		inode->i_gid = current_fsgid();
287 
288 	inode->i_mode = mode;
289 	inode->i_ino = ino;
290 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
291 
292 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
293 		err = nilfs_bmap_read(ii->i_bmap, NULL);
294 		if (err < 0)
295 			goto failed_bmap;
296 
297 		set_bit(NILFS_I_BMAP, &ii->i_state);
298 		/* No lock is needed; iget() ensures it. */
299 	}
300 
301 	ii->i_flags = NILFS_I(dir)->i_flags;
302 	if (S_ISLNK(mode))
303 		ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
304 	if (!S_ISDIR(mode))
305 		ii->i_flags &= ~NILFS_DIRSYNC_FL;
306 
307 	/* ii->i_file_acl = 0; */
308 	/* ii->i_dir_acl = 0; */
309 	ii->i_dir_start_lookup = 0;
310 #ifdef CONFIG_NILFS_FS_POSIX_ACL
311 	ii->i_acl = NULL;
312 	ii->i_default_acl = NULL;
313 #endif
314 	ii->i_cno = 0;
315 	nilfs_set_inode_flags(inode);
316 	spin_lock(&sbi->s_next_gen_lock);
317 	inode->i_generation = sbi->s_next_generation++;
318 	spin_unlock(&sbi->s_next_gen_lock);
319 	insert_inode_hash(inode);
320 
321 	err = nilfs_init_acl(inode, dir);
322 	if (unlikely(err))
323 		goto failed_acl; /* never occur. When supporting
324 				    nilfs_init_acl(), proper cancellation of
325 				    above jobs should be considered */
326 
327 	mark_inode_dirty(inode);
328 	return inode;
329 
330  failed_acl:
331  failed_bmap:
332 	inode->i_nlink = 0;
333 	iput(inode);  /* raw_inode will be deleted through
334 			 generic_delete_inode() */
335 	goto failed;
336 
337  failed_ifile_create_inode:
338 	make_bad_inode(inode);
339 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
340 			 called */
341  failed:
342 	return ERR_PTR(err);
343 }
344 
345 void nilfs_free_inode(struct inode *inode)
346 {
347 	struct super_block *sb = inode->i_sb;
348 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
349 
350 	clear_inode(inode);
351 	/* XXX: check error code? Is there any thing I can do? */
352 	(void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
353 	atomic_dec(&sbi->s_inodes_count);
354 }
355 
356 void nilfs_set_inode_flags(struct inode *inode)
357 {
358 	unsigned int flags = NILFS_I(inode)->i_flags;
359 
360 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
361 			    S_DIRSYNC);
362 	if (flags & NILFS_SYNC_FL)
363 		inode->i_flags |= S_SYNC;
364 	if (flags & NILFS_APPEND_FL)
365 		inode->i_flags |= S_APPEND;
366 	if (flags & NILFS_IMMUTABLE_FL)
367 		inode->i_flags |= S_IMMUTABLE;
368 #ifndef NILFS_ATIME_DISABLE
369 	if (flags & NILFS_NOATIME_FL)
370 #endif
371 		inode->i_flags |= S_NOATIME;
372 	if (flags & NILFS_DIRSYNC_FL)
373 		inode->i_flags |= S_DIRSYNC;
374 	mapping_set_gfp_mask(inode->i_mapping,
375 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
376 }
377 
378 int nilfs_read_inode_common(struct inode *inode,
379 			    struct nilfs_inode *raw_inode)
380 {
381 	struct nilfs_inode_info *ii = NILFS_I(inode);
382 	int err;
383 
384 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
385 	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
386 	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
387 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
388 	inode->i_size = le64_to_cpu(raw_inode->i_size);
389 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
390 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
391 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
392 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
393 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
394 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
395 	if (inode->i_nlink == 0 && inode->i_mode == 0)
396 		return -EINVAL; /* this inode is deleted */
397 
398 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
399 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
400 #if 0
401 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
402 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
403 		0 : le32_to_cpu(raw_inode->i_dir_acl);
404 #endif
405 	ii->i_cno = 0;
406 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
407 
408 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
409 	    S_ISLNK(inode->i_mode)) {
410 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
411 		if (err < 0)
412 			return err;
413 		set_bit(NILFS_I_BMAP, &ii->i_state);
414 		/* No lock is needed; iget() ensures it. */
415 	}
416 	return 0;
417 }
418 
419 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
420 			      struct inode *inode)
421 {
422 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
423 	struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
424 	struct buffer_head *bh;
425 	struct nilfs_inode *raw_inode;
426 	int err;
427 
428 	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
429 	err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
430 	if (unlikely(err))
431 		goto bad_inode;
432 
433 	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
434 
435 #ifdef CONFIG_NILFS_FS_POSIX_ACL
436 	ii->i_acl = NILFS_ACL_NOT_CACHED;
437 	ii->i_default_acl = NILFS_ACL_NOT_CACHED;
438 #endif
439 	if (nilfs_read_inode_common(inode, raw_inode))
440 		goto failed_unmap;
441 
442 	if (S_ISREG(inode->i_mode)) {
443 		inode->i_op = &nilfs_file_inode_operations;
444 		inode->i_fop = &nilfs_file_operations;
445 		inode->i_mapping->a_ops = &nilfs_aops;
446 	} else if (S_ISDIR(inode->i_mode)) {
447 		inode->i_op = &nilfs_dir_inode_operations;
448 		inode->i_fop = &nilfs_dir_operations;
449 		inode->i_mapping->a_ops = &nilfs_aops;
450 	} else if (S_ISLNK(inode->i_mode)) {
451 		inode->i_op = &nilfs_symlink_inode_operations;
452 		inode->i_mapping->a_ops = &nilfs_aops;
453 	} else {
454 		inode->i_op = &nilfs_special_inode_operations;
455 		init_special_inode(
456 			inode, inode->i_mode,
457 			new_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
458 	}
459 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
460 	brelse(bh);
461 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
462 	nilfs_set_inode_flags(inode);
463 	return 0;
464 
465  failed_unmap:
466 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
467 	brelse(bh);
468 
469  bad_inode:
470 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
471 	return err;
472 }
473 
474 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino)
475 {
476 	struct inode *inode;
477 	int err;
478 
479 	inode = iget_locked(sb, ino);
480 	if (unlikely(!inode))
481 		return ERR_PTR(-ENOMEM);
482 	if (!(inode->i_state & I_NEW))
483 		return inode;
484 
485 	err = __nilfs_read_inode(sb, ino, inode);
486 	if (unlikely(err)) {
487 		iget_failed(inode);
488 		return ERR_PTR(err);
489 	}
490 	unlock_new_inode(inode);
491 	return inode;
492 }
493 
494 void nilfs_write_inode_common(struct inode *inode,
495 			      struct nilfs_inode *raw_inode, int has_bmap)
496 {
497 	struct nilfs_inode_info *ii = NILFS_I(inode);
498 
499 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
500 	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
501 	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
502 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
503 	raw_inode->i_size = cpu_to_le64(inode->i_size);
504 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
505 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
506 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
507 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
508 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
509 
510 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
511 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
512 
513 	if (has_bmap)
514 		nilfs_bmap_write(ii->i_bmap, raw_inode);
515 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
516 		raw_inode->i_device_code =
517 			cpu_to_le64(new_encode_dev(inode->i_rdev));
518 	/* When extending inode, nilfs->ns_inode_size should be checked
519 	   for substitutions of appended fields */
520 }
521 
522 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
523 {
524 	ino_t ino = inode->i_ino;
525 	struct nilfs_inode_info *ii = NILFS_I(inode);
526 	struct super_block *sb = inode->i_sb;
527 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
528 	struct nilfs_inode *raw_inode;
529 
530 	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
531 
532 	/* The buffer is guarded with lock_buffer() by the caller */
533 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
534 		memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
535 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
536 
537 	nilfs_write_inode_common(inode, raw_inode, 0);
538 		/* XXX: call with has_bmap = 0 is a workaround to avoid
539 		   deadlock of bmap. This delays update of i_bmap to just
540 		   before writing */
541 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
542 }
543 
544 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
545 
546 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
547 				unsigned long from)
548 {
549 	unsigned long b;
550 	int ret;
551 
552 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
553 		return;
554  repeat:
555 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
556 	if (ret == -ENOENT)
557 		return;
558 	else if (ret < 0)
559 		goto failed;
560 
561 	if (b < from)
562 		return;
563 
564 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
565 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
566 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
567 	if (!ret || (ret == -ENOMEM &&
568 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
569 		goto repeat;
570 
571  failed:
572 	if (ret == -EINVAL)
573 		nilfs_error(ii->vfs_inode.i_sb, __func__,
574 			    "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino);
575 	else
576 		nilfs_warning(ii->vfs_inode.i_sb, __func__,
577 			      "failed to truncate bmap (ino=%lu, err=%d)",
578 			      ii->vfs_inode.i_ino, ret);
579 }
580 
581 void nilfs_truncate(struct inode *inode)
582 {
583 	unsigned long blkoff;
584 	unsigned int blocksize;
585 	struct nilfs_transaction_info ti;
586 	struct super_block *sb = inode->i_sb;
587 	struct nilfs_inode_info *ii = NILFS_I(inode);
588 
589 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
590 		return;
591 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
592 		return;
593 
594 	blocksize = sb->s_blocksize;
595 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
596 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
597 
598 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
599 
600 	nilfs_truncate_bmap(ii, blkoff);
601 
602 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
603 	if (IS_SYNC(inode))
604 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
605 
606 	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
607 	nilfs_transaction_commit(sb);
608 	/* May construct a logical segment and may fail in sync mode.
609 	   But truncate has no return value. */
610 }
611 
612 void nilfs_delete_inode(struct inode *inode)
613 {
614 	struct nilfs_transaction_info ti;
615 	struct super_block *sb = inode->i_sb;
616 	struct nilfs_inode_info *ii = NILFS_I(inode);
617 
618 	if (unlikely(is_bad_inode(inode))) {
619 		if (inode->i_data.nrpages)
620 			truncate_inode_pages(&inode->i_data, 0);
621 		clear_inode(inode);
622 		return;
623 	}
624 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
625 
626 	if (inode->i_data.nrpages)
627 		truncate_inode_pages(&inode->i_data, 0);
628 
629 	nilfs_truncate_bmap(ii, 0);
630 	nilfs_free_inode(inode);
631 	/* nilfs_free_inode() marks inode buffer dirty */
632 	if (IS_SYNC(inode))
633 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
634 	nilfs_transaction_commit(sb);
635 	/* May construct a logical segment and may fail in sync mode.
636 	   But delete_inode has no return value. */
637 }
638 
639 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
640 {
641 	struct nilfs_transaction_info ti;
642 	struct inode *inode = dentry->d_inode;
643 	struct super_block *sb = inode->i_sb;
644 	int err;
645 
646 	err = inode_change_ok(inode, iattr);
647 	if (err)
648 		return err;
649 
650 	err = nilfs_transaction_begin(sb, &ti, 0);
651 	if (unlikely(err))
652 		return err;
653 	err = inode_setattr(inode, iattr);
654 	if (!err && (iattr->ia_valid & ATTR_MODE))
655 		err = nilfs_acl_chmod(inode);
656 	if (likely(!err))
657 		err = nilfs_transaction_commit(sb);
658 	else
659 		nilfs_transaction_abort(sb);
660 
661 	return err;
662 }
663 
664 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
665 			   struct buffer_head **pbh)
666 {
667 	struct nilfs_inode_info *ii = NILFS_I(inode);
668 	int err;
669 
670 	spin_lock(&sbi->s_inode_lock);
671 	/* Caller of this function MUST lock s_inode_lock */
672 	if (ii->i_bh == NULL) {
673 		spin_unlock(&sbi->s_inode_lock);
674 		err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
675 						  pbh);
676 		if (unlikely(err))
677 			return err;
678 		spin_lock(&sbi->s_inode_lock);
679 		if (ii->i_bh == NULL)
680 			ii->i_bh = *pbh;
681 		else {
682 			brelse(*pbh);
683 			*pbh = ii->i_bh;
684 		}
685 	} else
686 		*pbh = ii->i_bh;
687 
688 	get_bh(*pbh);
689 	spin_unlock(&sbi->s_inode_lock);
690 	return 0;
691 }
692 
693 int nilfs_inode_dirty(struct inode *inode)
694 {
695 	struct nilfs_inode_info *ii = NILFS_I(inode);
696 	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
697 	int ret = 0;
698 
699 	if (!list_empty(&ii->i_dirty)) {
700 		spin_lock(&sbi->s_inode_lock);
701 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
702 			test_bit(NILFS_I_BUSY, &ii->i_state);
703 		spin_unlock(&sbi->s_inode_lock);
704 	}
705 	return ret;
706 }
707 
708 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
709 			 unsigned nr_dirty)
710 {
711 	struct nilfs_inode_info *ii = NILFS_I(inode);
712 
713 	atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
714 
715 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
716 		return 0;
717 
718 	spin_lock(&sbi->s_inode_lock);
719 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
720 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
721 		/* Because this routine may race with nilfs_dispose_list(),
722 		   we have to check NILFS_I_QUEUED here, too. */
723 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
724 			/* This will happen when somebody is freeing
725 			   this inode. */
726 			nilfs_warning(sbi->s_super, __func__,
727 				      "cannot get inode (ino=%lu)\n",
728 				      inode->i_ino);
729 			spin_unlock(&sbi->s_inode_lock);
730 			return -EINVAL; /* NILFS_I_DIRTY may remain for
731 					   freeing inode */
732 		}
733 		list_del(&ii->i_dirty);
734 		list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
735 		set_bit(NILFS_I_QUEUED, &ii->i_state);
736 	}
737 	spin_unlock(&sbi->s_inode_lock);
738 	return 0;
739 }
740 
741 int nilfs_mark_inode_dirty(struct inode *inode)
742 {
743 	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
744 	struct buffer_head *ibh;
745 	int err;
746 
747 	err = nilfs_load_inode_block(sbi, inode, &ibh);
748 	if (unlikely(err)) {
749 		nilfs_warning(inode->i_sb, __func__,
750 			      "failed to reget inode block.\n");
751 		return err;
752 	}
753 	lock_buffer(ibh);
754 	nilfs_update_inode(inode, ibh);
755 	unlock_buffer(ibh);
756 	nilfs_mdt_mark_buffer_dirty(ibh);
757 	nilfs_mdt_mark_dirty(sbi->s_ifile);
758 	brelse(ibh);
759 	return 0;
760 }
761 
762 /**
763  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
764  * @inode: inode of the file to be registered.
765  *
766  * nilfs_dirty_inode() loads a inode block containing the specified
767  * @inode and copies data from a nilfs_inode to a corresponding inode
768  * entry in the inode block. This operation is excluded from the segment
769  * construction. This function can be called both as a single operation
770  * and as a part of indivisible file operations.
771  */
772 void nilfs_dirty_inode(struct inode *inode)
773 {
774 	struct nilfs_transaction_info ti;
775 
776 	if (is_bad_inode(inode)) {
777 		nilfs_warning(inode->i_sb, __func__,
778 			      "tried to mark bad_inode dirty. ignored.\n");
779 		dump_stack();
780 		return;
781 	}
782 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
783 	nilfs_mark_inode_dirty(inode);
784 	nilfs_transaction_commit(inode->i_sb); /* never fails */
785 }
786