xref: /openbmc/linux/fs/nilfs2/inode.c (revision 9b1fc4e4)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 struct nilfs_iget_args {
38 	u64 ino;
39 	__u64 cno;
40 	struct nilfs_root *root;
41 	int for_gc;
42 };
43 
44 void nilfs_inode_add_blocks(struct inode *inode, int n)
45 {
46 	struct nilfs_root *root = NILFS_I(inode)->i_root;
47 
48 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
49 	if (root)
50 		atomic_add(n, &root->blocks_count);
51 }
52 
53 void nilfs_inode_sub_blocks(struct inode *inode, int n)
54 {
55 	struct nilfs_root *root = NILFS_I(inode)->i_root;
56 
57 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
58 	if (root)
59 		atomic_sub(n, &root->blocks_count);
60 }
61 
62 /**
63  * nilfs_get_block() - get a file block on the filesystem (callback function)
64  * @inode - inode struct of the target file
65  * @blkoff - file block number
66  * @bh_result - buffer head to be mapped on
67  * @create - indicate whether allocating the block or not when it has not
68  *      been allocated yet.
69  *
70  * This function does not issue actual read request of the specified data
71  * block. It is done by VFS.
72  */
73 int nilfs_get_block(struct inode *inode, sector_t blkoff,
74 		    struct buffer_head *bh_result, int create)
75 {
76 	struct nilfs_inode_info *ii = NILFS_I(inode);
77 	__u64 blknum = 0;
78 	int err = 0, ret;
79 	struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
80 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
81 
82 	down_read(&NILFS_MDT(dat)->mi_sem);
83 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
84 	up_read(&NILFS_MDT(dat)->mi_sem);
85 	if (ret >= 0) {	/* found */
86 		map_bh(bh_result, inode->i_sb, blknum);
87 		if (ret > 0)
88 			bh_result->b_size = (ret << inode->i_blkbits);
89 		goto out;
90 	}
91 	/* data block was not found */
92 	if (ret == -ENOENT && create) {
93 		struct nilfs_transaction_info ti;
94 
95 		bh_result->b_blocknr = 0;
96 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
97 		if (unlikely(err))
98 			goto out;
99 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
100 					(unsigned long)bh_result);
101 		if (unlikely(err != 0)) {
102 			if (err == -EEXIST) {
103 				/*
104 				 * The get_block() function could be called
105 				 * from multiple callers for an inode.
106 				 * However, the page having this block must
107 				 * be locked in this case.
108 				 */
109 				printk(KERN_WARNING
110 				       "nilfs_get_block: a race condition "
111 				       "while inserting a data block. "
112 				       "(inode number=%lu, file block "
113 				       "offset=%llu)\n",
114 				       inode->i_ino,
115 				       (unsigned long long)blkoff);
116 				err = 0;
117 			}
118 			nilfs_transaction_abort(inode->i_sb);
119 			goto out;
120 		}
121 		nilfs_mark_inode_dirty(inode);
122 		nilfs_transaction_commit(inode->i_sb); /* never fails */
123 		/* Error handling should be detailed */
124 		set_buffer_new(bh_result);
125 		set_buffer_delay(bh_result);
126 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
127 						      to proper value */
128 	} else if (ret == -ENOENT) {
129 		/* not found is not error (e.g. hole); must return without
130 		   the mapped state flag. */
131 		;
132 	} else {
133 		err = ret;
134 	}
135 
136  out:
137 	return err;
138 }
139 
140 /**
141  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
142  * address_space_operations.
143  * @file - file struct of the file to be read
144  * @page - the page to be read
145  */
146 static int nilfs_readpage(struct file *file, struct page *page)
147 {
148 	return mpage_readpage(page, nilfs_get_block);
149 }
150 
151 /**
152  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
153  * address_space_operations.
154  * @file - file struct of the file to be read
155  * @mapping - address_space struct used for reading multiple pages
156  * @pages - the pages to be read
157  * @nr_pages - number of pages to be read
158  */
159 static int nilfs_readpages(struct file *file, struct address_space *mapping,
160 			   struct list_head *pages, unsigned nr_pages)
161 {
162 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
163 }
164 
165 static int nilfs_writepages(struct address_space *mapping,
166 			    struct writeback_control *wbc)
167 {
168 	struct inode *inode = mapping->host;
169 	int err = 0;
170 
171 	if (wbc->sync_mode == WB_SYNC_ALL)
172 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
173 						    wbc->range_start,
174 						    wbc->range_end);
175 	return err;
176 }
177 
178 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
179 {
180 	struct inode *inode = page->mapping->host;
181 	int err;
182 
183 	redirty_page_for_writepage(wbc, page);
184 	unlock_page(page);
185 
186 	if (wbc->sync_mode == WB_SYNC_ALL) {
187 		err = nilfs_construct_segment(inode->i_sb);
188 		if (unlikely(err))
189 			return err;
190 	} else if (wbc->for_reclaim)
191 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
192 
193 	return 0;
194 }
195 
196 static int nilfs_set_page_dirty(struct page *page)
197 {
198 	int ret = __set_page_dirty_buffers(page);
199 
200 	if (ret) {
201 		struct inode *inode = page->mapping->host;
202 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
203 
204 		nilfs_set_file_dirty(inode, nr_dirty);
205 	}
206 	return ret;
207 }
208 
209 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
210 			     loff_t pos, unsigned len, unsigned flags,
211 			     struct page **pagep, void **fsdata)
212 
213 {
214 	struct inode *inode = mapping->host;
215 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
216 
217 	if (unlikely(err))
218 		return err;
219 
220 	err = block_write_begin(mapping, pos, len, flags, pagep,
221 				nilfs_get_block);
222 	if (unlikely(err)) {
223 		loff_t isize = mapping->host->i_size;
224 		if (pos + len > isize)
225 			vmtruncate(mapping->host, isize);
226 
227 		nilfs_transaction_abort(inode->i_sb);
228 	}
229 	return err;
230 }
231 
232 static int nilfs_write_end(struct file *file, struct address_space *mapping,
233 			   loff_t pos, unsigned len, unsigned copied,
234 			   struct page *page, void *fsdata)
235 {
236 	struct inode *inode = mapping->host;
237 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
238 	unsigned nr_dirty;
239 	int err;
240 
241 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
242 						  start + copied);
243 	copied = generic_write_end(file, mapping, pos, len, copied, page,
244 				   fsdata);
245 	nilfs_set_file_dirty(inode, nr_dirty);
246 	err = nilfs_transaction_commit(inode->i_sb);
247 	return err ? : copied;
248 }
249 
250 static ssize_t
251 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
252 		loff_t offset, unsigned long nr_segs)
253 {
254 	struct file *file = iocb->ki_filp;
255 	struct inode *inode = file->f_mapping->host;
256 	ssize_t size;
257 
258 	if (rw == WRITE)
259 		return 0;
260 
261 	/* Needs synchronization with the cleaner */
262 	size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
263 				  offset, nr_segs, nilfs_get_block, NULL);
264 
265 	/*
266 	 * In case of error extending write may have instantiated a few
267 	 * blocks outside i_size. Trim these off again.
268 	 */
269 	if (unlikely((rw & WRITE) && size < 0)) {
270 		loff_t isize = i_size_read(inode);
271 		loff_t end = offset + iov_length(iov, nr_segs);
272 
273 		if (end > isize)
274 			vmtruncate(inode, isize);
275 	}
276 
277 	return size;
278 }
279 
280 const struct address_space_operations nilfs_aops = {
281 	.writepage		= nilfs_writepage,
282 	.readpage		= nilfs_readpage,
283 	.sync_page		= block_sync_page,
284 	.writepages		= nilfs_writepages,
285 	.set_page_dirty		= nilfs_set_page_dirty,
286 	.readpages		= nilfs_readpages,
287 	.write_begin		= nilfs_write_begin,
288 	.write_end		= nilfs_write_end,
289 	/* .releasepage		= nilfs_releasepage, */
290 	.invalidatepage		= block_invalidatepage,
291 	.direct_IO		= nilfs_direct_IO,
292 	.is_partially_uptodate  = block_is_partially_uptodate,
293 };
294 
295 struct inode *nilfs_new_inode(struct inode *dir, int mode)
296 {
297 	struct super_block *sb = dir->i_sb;
298 	struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
299 	struct inode *inode;
300 	struct nilfs_inode_info *ii;
301 	struct nilfs_root *root;
302 	int err = -ENOMEM;
303 	ino_t ino;
304 
305 	inode = new_inode(sb);
306 	if (unlikely(!inode))
307 		goto failed;
308 
309 	mapping_set_gfp_mask(inode->i_mapping,
310 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
311 
312 	root = NILFS_I(dir)->i_root;
313 	ii = NILFS_I(inode);
314 	ii->i_state = 1 << NILFS_I_NEW;
315 	ii->i_root = root;
316 
317 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
318 	if (unlikely(err))
319 		goto failed_ifile_create_inode;
320 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
321 
322 	atomic_inc(&root->inodes_count);
323 	inode_init_owner(inode, dir, mode);
324 	inode->i_ino = ino;
325 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
326 
327 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
328 		err = nilfs_bmap_read(ii->i_bmap, NULL);
329 		if (err < 0)
330 			goto failed_bmap;
331 
332 		set_bit(NILFS_I_BMAP, &ii->i_state);
333 		/* No lock is needed; iget() ensures it. */
334 	}
335 
336 	ii->i_flags = nilfs_mask_flags(
337 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
338 
339 	/* ii->i_file_acl = 0; */
340 	/* ii->i_dir_acl = 0; */
341 	ii->i_dir_start_lookup = 0;
342 	nilfs_set_inode_flags(inode);
343 	spin_lock(&nilfs->ns_next_gen_lock);
344 	inode->i_generation = nilfs->ns_next_generation++;
345 	spin_unlock(&nilfs->ns_next_gen_lock);
346 	insert_inode_hash(inode);
347 
348 	err = nilfs_init_acl(inode, dir);
349 	if (unlikely(err))
350 		goto failed_acl; /* never occur. When supporting
351 				    nilfs_init_acl(), proper cancellation of
352 				    above jobs should be considered */
353 
354 	return inode;
355 
356  failed_acl:
357  failed_bmap:
358 	inode->i_nlink = 0;
359 	iput(inode);  /* raw_inode will be deleted through
360 			 generic_delete_inode() */
361 	goto failed;
362 
363  failed_ifile_create_inode:
364 	make_bad_inode(inode);
365 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
366 			 called */
367  failed:
368 	return ERR_PTR(err);
369 }
370 
371 void nilfs_set_inode_flags(struct inode *inode)
372 {
373 	unsigned int flags = NILFS_I(inode)->i_flags;
374 
375 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
376 			    S_DIRSYNC);
377 	if (flags & FS_SYNC_FL)
378 		inode->i_flags |= S_SYNC;
379 	if (flags & FS_APPEND_FL)
380 		inode->i_flags |= S_APPEND;
381 	if (flags & FS_IMMUTABLE_FL)
382 		inode->i_flags |= S_IMMUTABLE;
383 	if (flags & FS_NOATIME_FL)
384 		inode->i_flags |= S_NOATIME;
385 	if (flags & FS_DIRSYNC_FL)
386 		inode->i_flags |= S_DIRSYNC;
387 	mapping_set_gfp_mask(inode->i_mapping,
388 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
389 }
390 
391 int nilfs_read_inode_common(struct inode *inode,
392 			    struct nilfs_inode *raw_inode)
393 {
394 	struct nilfs_inode_info *ii = NILFS_I(inode);
395 	int err;
396 
397 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
398 	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
399 	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
400 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
401 	inode->i_size = le64_to_cpu(raw_inode->i_size);
402 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
403 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
404 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
405 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
406 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
407 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
408 	if (inode->i_nlink == 0 && inode->i_mode == 0)
409 		return -EINVAL; /* this inode is deleted */
410 
411 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
412 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
413 #if 0
414 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
415 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
416 		0 : le32_to_cpu(raw_inode->i_dir_acl);
417 #endif
418 	ii->i_dir_start_lookup = 0;
419 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
420 
421 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
422 	    S_ISLNK(inode->i_mode)) {
423 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
424 		if (err < 0)
425 			return err;
426 		set_bit(NILFS_I_BMAP, &ii->i_state);
427 		/* No lock is needed; iget() ensures it. */
428 	}
429 	return 0;
430 }
431 
432 static int __nilfs_read_inode(struct super_block *sb,
433 			      struct nilfs_root *root, unsigned long ino,
434 			      struct inode *inode)
435 {
436 	struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
437 	struct buffer_head *bh;
438 	struct nilfs_inode *raw_inode;
439 	int err;
440 
441 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
442 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
443 	if (unlikely(err))
444 		goto bad_inode;
445 
446 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
447 
448 	err = nilfs_read_inode_common(inode, raw_inode);
449 	if (err)
450 		goto failed_unmap;
451 
452 	if (S_ISREG(inode->i_mode)) {
453 		inode->i_op = &nilfs_file_inode_operations;
454 		inode->i_fop = &nilfs_file_operations;
455 		inode->i_mapping->a_ops = &nilfs_aops;
456 	} else if (S_ISDIR(inode->i_mode)) {
457 		inode->i_op = &nilfs_dir_inode_operations;
458 		inode->i_fop = &nilfs_dir_operations;
459 		inode->i_mapping->a_ops = &nilfs_aops;
460 	} else if (S_ISLNK(inode->i_mode)) {
461 		inode->i_op = &nilfs_symlink_inode_operations;
462 		inode->i_mapping->a_ops = &nilfs_aops;
463 	} else {
464 		inode->i_op = &nilfs_special_inode_operations;
465 		init_special_inode(
466 			inode, inode->i_mode,
467 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
468 	}
469 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
470 	brelse(bh);
471 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
472 	nilfs_set_inode_flags(inode);
473 	return 0;
474 
475  failed_unmap:
476 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
477 	brelse(bh);
478 
479  bad_inode:
480 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
481 	return err;
482 }
483 
484 static int nilfs_iget_test(struct inode *inode, void *opaque)
485 {
486 	struct nilfs_iget_args *args = opaque;
487 	struct nilfs_inode_info *ii;
488 
489 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
490 		return 0;
491 
492 	ii = NILFS_I(inode);
493 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
494 		return !args->for_gc;
495 
496 	return args->for_gc && args->cno == ii->i_cno;
497 }
498 
499 static int nilfs_iget_set(struct inode *inode, void *opaque)
500 {
501 	struct nilfs_iget_args *args = opaque;
502 
503 	inode->i_ino = args->ino;
504 	if (args->for_gc) {
505 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
506 		NILFS_I(inode)->i_cno = args->cno;
507 		NILFS_I(inode)->i_root = NULL;
508 	} else {
509 		if (args->root && args->ino == NILFS_ROOT_INO)
510 			nilfs_get_root(args->root);
511 		NILFS_I(inode)->i_root = args->root;
512 	}
513 	return 0;
514 }
515 
516 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
517 			    unsigned long ino)
518 {
519 	struct nilfs_iget_args args = {
520 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
521 	};
522 
523 	return ilookup5(sb, ino, nilfs_iget_test, &args);
524 }
525 
526 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
527 				unsigned long ino)
528 {
529 	struct nilfs_iget_args args = {
530 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
531 	};
532 
533 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
534 }
535 
536 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
537 			 unsigned long ino)
538 {
539 	struct inode *inode;
540 	int err;
541 
542 	inode = nilfs_iget_locked(sb, root, ino);
543 	if (unlikely(!inode))
544 		return ERR_PTR(-ENOMEM);
545 	if (!(inode->i_state & I_NEW))
546 		return inode;
547 
548 	err = __nilfs_read_inode(sb, root, ino, inode);
549 	if (unlikely(err)) {
550 		iget_failed(inode);
551 		return ERR_PTR(err);
552 	}
553 	unlock_new_inode(inode);
554 	return inode;
555 }
556 
557 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
558 				__u64 cno)
559 {
560 	struct nilfs_iget_args args = {
561 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
562 	};
563 	struct inode *inode;
564 	int err;
565 
566 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
567 	if (unlikely(!inode))
568 		return ERR_PTR(-ENOMEM);
569 	if (!(inode->i_state & I_NEW))
570 		return inode;
571 
572 	err = nilfs_init_gcinode(inode);
573 	if (unlikely(err)) {
574 		iget_failed(inode);
575 		return ERR_PTR(err);
576 	}
577 	unlock_new_inode(inode);
578 	return inode;
579 }
580 
581 void nilfs_write_inode_common(struct inode *inode,
582 			      struct nilfs_inode *raw_inode, int has_bmap)
583 {
584 	struct nilfs_inode_info *ii = NILFS_I(inode);
585 
586 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
587 	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
588 	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
589 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
590 	raw_inode->i_size = cpu_to_le64(inode->i_size);
591 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
592 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
593 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
594 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
595 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
596 
597 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
598 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
599 
600 	if (has_bmap)
601 		nilfs_bmap_write(ii->i_bmap, raw_inode);
602 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
603 		raw_inode->i_device_code =
604 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
605 	/* When extending inode, nilfs->ns_inode_size should be checked
606 	   for substitutions of appended fields */
607 }
608 
609 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
610 {
611 	ino_t ino = inode->i_ino;
612 	struct nilfs_inode_info *ii = NILFS_I(inode);
613 	struct inode *ifile = ii->i_root->ifile;
614 	struct nilfs_inode *raw_inode;
615 
616 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
617 
618 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
619 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
620 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
621 
622 	nilfs_write_inode_common(inode, raw_inode, 0);
623 		/* XXX: call with has_bmap = 0 is a workaround to avoid
624 		   deadlock of bmap. This delays update of i_bmap to just
625 		   before writing */
626 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
627 }
628 
629 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
630 
631 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
632 				unsigned long from)
633 {
634 	unsigned long b;
635 	int ret;
636 
637 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
638 		return;
639 repeat:
640 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
641 	if (ret == -ENOENT)
642 		return;
643 	else if (ret < 0)
644 		goto failed;
645 
646 	if (b < from)
647 		return;
648 
649 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
650 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
651 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
652 	if (!ret || (ret == -ENOMEM &&
653 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
654 		goto repeat;
655 
656 failed:
657 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
658 		      "failed to truncate bmap (ino=%lu, err=%d)",
659 		      ii->vfs_inode.i_ino, ret);
660 }
661 
662 void nilfs_truncate(struct inode *inode)
663 {
664 	unsigned long blkoff;
665 	unsigned int blocksize;
666 	struct nilfs_transaction_info ti;
667 	struct super_block *sb = inode->i_sb;
668 	struct nilfs_inode_info *ii = NILFS_I(inode);
669 
670 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
671 		return;
672 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
673 		return;
674 
675 	blocksize = sb->s_blocksize;
676 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
677 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
678 
679 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
680 
681 	nilfs_truncate_bmap(ii, blkoff);
682 
683 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
684 	if (IS_SYNC(inode))
685 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
686 
687 	nilfs_mark_inode_dirty(inode);
688 	nilfs_set_file_dirty(inode, 0);
689 	nilfs_transaction_commit(sb);
690 	/* May construct a logical segment and may fail in sync mode.
691 	   But truncate has no return value. */
692 }
693 
694 static void nilfs_clear_inode(struct inode *inode)
695 {
696 	struct nilfs_inode_info *ii = NILFS_I(inode);
697 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
698 
699 	/*
700 	 * Free resources allocated in nilfs_read_inode(), here.
701 	 */
702 	BUG_ON(!list_empty(&ii->i_dirty));
703 	brelse(ii->i_bh);
704 	ii->i_bh = NULL;
705 
706 	if (mdi && mdi->mi_palloc_cache)
707 		nilfs_palloc_destroy_cache(inode);
708 
709 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
710 		nilfs_bmap_clear(ii->i_bmap);
711 
712 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
713 
714 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
715 		nilfs_put_root(ii->i_root);
716 }
717 
718 void nilfs_evict_inode(struct inode *inode)
719 {
720 	struct nilfs_transaction_info ti;
721 	struct super_block *sb = inode->i_sb;
722 	struct nilfs_inode_info *ii = NILFS_I(inode);
723 	int ret;
724 
725 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
726 		if (inode->i_data.nrpages)
727 			truncate_inode_pages(&inode->i_data, 0);
728 		end_writeback(inode);
729 		nilfs_clear_inode(inode);
730 		return;
731 	}
732 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
733 
734 	if (inode->i_data.nrpages)
735 		truncate_inode_pages(&inode->i_data, 0);
736 
737 	/* TODO: some of the following operations may fail.  */
738 	nilfs_truncate_bmap(ii, 0);
739 	nilfs_mark_inode_dirty(inode);
740 	end_writeback(inode);
741 
742 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
743 	if (!ret)
744 		atomic_dec(&ii->i_root->inodes_count);
745 
746 	nilfs_clear_inode(inode);
747 
748 	if (IS_SYNC(inode))
749 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
750 	nilfs_transaction_commit(sb);
751 	/* May construct a logical segment and may fail in sync mode.
752 	   But delete_inode has no return value. */
753 }
754 
755 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
756 {
757 	struct nilfs_transaction_info ti;
758 	struct inode *inode = dentry->d_inode;
759 	struct super_block *sb = inode->i_sb;
760 	int err;
761 
762 	err = inode_change_ok(inode, iattr);
763 	if (err)
764 		return err;
765 
766 	err = nilfs_transaction_begin(sb, &ti, 0);
767 	if (unlikely(err))
768 		return err;
769 
770 	if ((iattr->ia_valid & ATTR_SIZE) &&
771 	    iattr->ia_size != i_size_read(inode)) {
772 		err = vmtruncate(inode, iattr->ia_size);
773 		if (unlikely(err))
774 			goto out_err;
775 	}
776 
777 	setattr_copy(inode, iattr);
778 	mark_inode_dirty(inode);
779 
780 	if (iattr->ia_valid & ATTR_MODE) {
781 		err = nilfs_acl_chmod(inode);
782 		if (unlikely(err))
783 			goto out_err;
784 	}
785 
786 	return nilfs_transaction_commit(sb);
787 
788 out_err:
789 	nilfs_transaction_abort(sb);
790 	return err;
791 }
792 
793 int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
794 {
795 	struct nilfs_root *root;
796 
797 	if (flags & IPERM_FLAG_RCU)
798 		return -ECHILD;
799 
800 	root = NILFS_I(inode)->i_root;
801 	if ((mask & MAY_WRITE) && root &&
802 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
803 		return -EROFS; /* snapshot is not writable */
804 
805 	return generic_permission(inode, mask, flags, NULL);
806 }
807 
808 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
809 {
810 	struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
811 	struct nilfs_inode_info *ii = NILFS_I(inode);
812 	int err;
813 
814 	spin_lock(&nilfs->ns_inode_lock);
815 	if (ii->i_bh == NULL) {
816 		spin_unlock(&nilfs->ns_inode_lock);
817 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
818 						  inode->i_ino, pbh);
819 		if (unlikely(err))
820 			return err;
821 		spin_lock(&nilfs->ns_inode_lock);
822 		if (ii->i_bh == NULL)
823 			ii->i_bh = *pbh;
824 		else {
825 			brelse(*pbh);
826 			*pbh = ii->i_bh;
827 		}
828 	} else
829 		*pbh = ii->i_bh;
830 
831 	get_bh(*pbh);
832 	spin_unlock(&nilfs->ns_inode_lock);
833 	return 0;
834 }
835 
836 int nilfs_inode_dirty(struct inode *inode)
837 {
838 	struct nilfs_inode_info *ii = NILFS_I(inode);
839 	struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
840 	int ret = 0;
841 
842 	if (!list_empty(&ii->i_dirty)) {
843 		spin_lock(&nilfs->ns_inode_lock);
844 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
845 			test_bit(NILFS_I_BUSY, &ii->i_state);
846 		spin_unlock(&nilfs->ns_inode_lock);
847 	}
848 	return ret;
849 }
850 
851 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
852 {
853 	struct nilfs_inode_info *ii = NILFS_I(inode);
854 	struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
855 
856 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
857 
858 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
859 		return 0;
860 
861 	spin_lock(&nilfs->ns_inode_lock);
862 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
863 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
864 		/* Because this routine may race with nilfs_dispose_list(),
865 		   we have to check NILFS_I_QUEUED here, too. */
866 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
867 			/* This will happen when somebody is freeing
868 			   this inode. */
869 			nilfs_warning(inode->i_sb, __func__,
870 				      "cannot get inode (ino=%lu)\n",
871 				      inode->i_ino);
872 			spin_unlock(&nilfs->ns_inode_lock);
873 			return -EINVAL; /* NILFS_I_DIRTY may remain for
874 					   freeing inode */
875 		}
876 		list_del(&ii->i_dirty);
877 		list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
878 		set_bit(NILFS_I_QUEUED, &ii->i_state);
879 	}
880 	spin_unlock(&nilfs->ns_inode_lock);
881 	return 0;
882 }
883 
884 int nilfs_mark_inode_dirty(struct inode *inode)
885 {
886 	struct buffer_head *ibh;
887 	int err;
888 
889 	err = nilfs_load_inode_block(inode, &ibh);
890 	if (unlikely(err)) {
891 		nilfs_warning(inode->i_sb, __func__,
892 			      "failed to reget inode block.\n");
893 		return err;
894 	}
895 	nilfs_update_inode(inode, ibh);
896 	nilfs_mdt_mark_buffer_dirty(ibh);
897 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
898 	brelse(ibh);
899 	return 0;
900 }
901 
902 /**
903  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
904  * @inode: inode of the file to be registered.
905  *
906  * nilfs_dirty_inode() loads a inode block containing the specified
907  * @inode and copies data from a nilfs_inode to a corresponding inode
908  * entry in the inode block. This operation is excluded from the segment
909  * construction. This function can be called both as a single operation
910  * and as a part of indivisible file operations.
911  */
912 void nilfs_dirty_inode(struct inode *inode)
913 {
914 	struct nilfs_transaction_info ti;
915 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
916 
917 	if (is_bad_inode(inode)) {
918 		nilfs_warning(inode->i_sb, __func__,
919 			      "tried to mark bad_inode dirty. ignored.\n");
920 		dump_stack();
921 		return;
922 	}
923 	if (mdi) {
924 		nilfs_mdt_mark_dirty(inode);
925 		return;
926 	}
927 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
928 	nilfs_mark_inode_dirty(inode);
929 	nilfs_transaction_commit(inode->i_sb); /* never fails */
930 }
931 
932 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
933 		 __u64 start, __u64 len)
934 {
935 	struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
936 	__u64 logical = 0, phys = 0, size = 0;
937 	__u32 flags = 0;
938 	loff_t isize;
939 	sector_t blkoff, end_blkoff;
940 	sector_t delalloc_blkoff;
941 	unsigned long delalloc_blklen;
942 	unsigned int blkbits = inode->i_blkbits;
943 	int ret, n;
944 
945 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
946 	if (ret)
947 		return ret;
948 
949 	mutex_lock(&inode->i_mutex);
950 
951 	isize = i_size_read(inode);
952 
953 	blkoff = start >> blkbits;
954 	end_blkoff = (start + len - 1) >> blkbits;
955 
956 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
957 							&delalloc_blkoff);
958 
959 	do {
960 		__u64 blkphy;
961 		unsigned int maxblocks;
962 
963 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
964 			if (size) {
965 				/* End of the current extent */
966 				ret = fiemap_fill_next_extent(
967 					fieinfo, logical, phys, size, flags);
968 				if (ret)
969 					break;
970 			}
971 			if (blkoff > end_blkoff)
972 				break;
973 
974 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
975 			logical = blkoff << blkbits;
976 			phys = 0;
977 			size = delalloc_blklen << blkbits;
978 
979 			blkoff = delalloc_blkoff + delalloc_blklen;
980 			delalloc_blklen = nilfs_find_uncommitted_extent(
981 				inode, blkoff, &delalloc_blkoff);
982 			continue;
983 		}
984 
985 		/*
986 		 * Limit the number of blocks that we look up so as
987 		 * not to get into the next delayed allocation extent.
988 		 */
989 		maxblocks = INT_MAX;
990 		if (delalloc_blklen)
991 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
992 					  maxblocks);
993 		blkphy = 0;
994 
995 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
996 		n = nilfs_bmap_lookup_contig(
997 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
998 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
999 
1000 		if (n < 0) {
1001 			int past_eof;
1002 
1003 			if (unlikely(n != -ENOENT))
1004 				break; /* error */
1005 
1006 			/* HOLE */
1007 			blkoff++;
1008 			past_eof = ((blkoff << blkbits) >= isize);
1009 
1010 			if (size) {
1011 				/* End of the current extent */
1012 
1013 				if (past_eof)
1014 					flags |= FIEMAP_EXTENT_LAST;
1015 
1016 				ret = fiemap_fill_next_extent(
1017 					fieinfo, logical, phys, size, flags);
1018 				if (ret)
1019 					break;
1020 				size = 0;
1021 			}
1022 			if (blkoff > end_blkoff || past_eof)
1023 				break;
1024 		} else {
1025 			if (size) {
1026 				if (phys && blkphy << blkbits == phys + size) {
1027 					/* The current extent goes on */
1028 					size += n << blkbits;
1029 				} else {
1030 					/* Terminate the current extent */
1031 					ret = fiemap_fill_next_extent(
1032 						fieinfo, logical, phys, size,
1033 						flags);
1034 					if (ret || blkoff > end_blkoff)
1035 						break;
1036 
1037 					/* Start another extent */
1038 					flags = FIEMAP_EXTENT_MERGED;
1039 					logical = blkoff << blkbits;
1040 					phys = blkphy << blkbits;
1041 					size = n << blkbits;
1042 				}
1043 			} else {
1044 				/* Start a new extent */
1045 				flags = FIEMAP_EXTENT_MERGED;
1046 				logical = blkoff << blkbits;
1047 				phys = blkphy << blkbits;
1048 				size = n << blkbits;
1049 			}
1050 			blkoff += n;
1051 		}
1052 		cond_resched();
1053 	} while (true);
1054 
1055 	/* If ret is 1 then we just hit the end of the extent array */
1056 	if (ret == 1)
1057 		ret = 0;
1058 
1059 	mutex_unlock(&inode->i_mutex);
1060 	return ret;
1061 }
1062