xref: /openbmc/linux/fs/nilfs2/inode.c (revision c6e49e3f)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 struct nilfs_iget_args {
38 	u64 ino;
39 	__u64 cno;
40 	struct nilfs_root *root;
41 	int for_gc;
42 };
43 
44 void nilfs_inode_add_blocks(struct inode *inode, int n)
45 {
46 	struct nilfs_root *root = NILFS_I(inode)->i_root;
47 
48 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
49 	if (root)
50 		atomic_add(n, &root->blocks_count);
51 }
52 
53 void nilfs_inode_sub_blocks(struct inode *inode, int n)
54 {
55 	struct nilfs_root *root = NILFS_I(inode)->i_root;
56 
57 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
58 	if (root)
59 		atomic_sub(n, &root->blocks_count);
60 }
61 
62 /**
63  * nilfs_get_block() - get a file block on the filesystem (callback function)
64  * @inode - inode struct of the target file
65  * @blkoff - file block number
66  * @bh_result - buffer head to be mapped on
67  * @create - indicate whether allocating the block or not when it has not
68  *      been allocated yet.
69  *
70  * This function does not issue actual read request of the specified data
71  * block. It is done by VFS.
72  */
73 int nilfs_get_block(struct inode *inode, sector_t blkoff,
74 		    struct buffer_head *bh_result, int create)
75 {
76 	struct nilfs_inode_info *ii = NILFS_I(inode);
77 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
78 	__u64 blknum = 0;
79 	int err = 0, ret;
80 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
81 
82 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
83 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
84 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85 	if (ret >= 0) {	/* found */
86 		map_bh(bh_result, inode->i_sb, blknum);
87 		if (ret > 0)
88 			bh_result->b_size = (ret << inode->i_blkbits);
89 		goto out;
90 	}
91 	/* data block was not found */
92 	if (ret == -ENOENT && create) {
93 		struct nilfs_transaction_info ti;
94 
95 		bh_result->b_blocknr = 0;
96 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
97 		if (unlikely(err))
98 			goto out;
99 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
100 					(unsigned long)bh_result);
101 		if (unlikely(err != 0)) {
102 			if (err == -EEXIST) {
103 				/*
104 				 * The get_block() function could be called
105 				 * from multiple callers for an inode.
106 				 * However, the page having this block must
107 				 * be locked in this case.
108 				 */
109 				printk(KERN_WARNING
110 				       "nilfs_get_block: a race condition "
111 				       "while inserting a data block. "
112 				       "(inode number=%lu, file block "
113 				       "offset=%llu)\n",
114 				       inode->i_ino,
115 				       (unsigned long long)blkoff);
116 				err = 0;
117 			}
118 			nilfs_transaction_abort(inode->i_sb);
119 			goto out;
120 		}
121 		nilfs_mark_inode_dirty(inode);
122 		nilfs_transaction_commit(inode->i_sb); /* never fails */
123 		/* Error handling should be detailed */
124 		set_buffer_new(bh_result);
125 		set_buffer_delay(bh_result);
126 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
127 						      to proper value */
128 	} else if (ret == -ENOENT) {
129 		/* not found is not error (e.g. hole); must return without
130 		   the mapped state flag. */
131 		;
132 	} else {
133 		err = ret;
134 	}
135 
136  out:
137 	return err;
138 }
139 
140 /**
141  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
142  * address_space_operations.
143  * @file - file struct of the file to be read
144  * @page - the page to be read
145  */
146 static int nilfs_readpage(struct file *file, struct page *page)
147 {
148 	return mpage_readpage(page, nilfs_get_block);
149 }
150 
151 /**
152  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
153  * address_space_operations.
154  * @file - file struct of the file to be read
155  * @mapping - address_space struct used for reading multiple pages
156  * @pages - the pages to be read
157  * @nr_pages - number of pages to be read
158  */
159 static int nilfs_readpages(struct file *file, struct address_space *mapping,
160 			   struct list_head *pages, unsigned nr_pages)
161 {
162 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
163 }
164 
165 static int nilfs_writepages(struct address_space *mapping,
166 			    struct writeback_control *wbc)
167 {
168 	struct inode *inode = mapping->host;
169 	int err = 0;
170 
171 	if (wbc->sync_mode == WB_SYNC_ALL)
172 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
173 						    wbc->range_start,
174 						    wbc->range_end);
175 	return err;
176 }
177 
178 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
179 {
180 	struct inode *inode = page->mapping->host;
181 	int err;
182 
183 	redirty_page_for_writepage(wbc, page);
184 	unlock_page(page);
185 
186 	if (wbc->sync_mode == WB_SYNC_ALL) {
187 		err = nilfs_construct_segment(inode->i_sb);
188 		if (unlikely(err))
189 			return err;
190 	} else if (wbc->for_reclaim)
191 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
192 
193 	return 0;
194 }
195 
196 static int nilfs_set_page_dirty(struct page *page)
197 {
198 	int ret = __set_page_dirty_buffers(page);
199 
200 	if (ret) {
201 		struct inode *inode = page->mapping->host;
202 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
203 
204 		nilfs_set_file_dirty(inode, nr_dirty);
205 	}
206 	return ret;
207 }
208 
209 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
210 			     loff_t pos, unsigned len, unsigned flags,
211 			     struct page **pagep, void **fsdata)
212 
213 {
214 	struct inode *inode = mapping->host;
215 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
216 
217 	if (unlikely(err))
218 		return err;
219 
220 	err = block_write_begin(mapping, pos, len, flags, pagep,
221 				nilfs_get_block);
222 	if (unlikely(err)) {
223 		loff_t isize = mapping->host->i_size;
224 		if (pos + len > isize)
225 			vmtruncate(mapping->host, isize);
226 
227 		nilfs_transaction_abort(inode->i_sb);
228 	}
229 	return err;
230 }
231 
232 static int nilfs_write_end(struct file *file, struct address_space *mapping,
233 			   loff_t pos, unsigned len, unsigned copied,
234 			   struct page *page, void *fsdata)
235 {
236 	struct inode *inode = mapping->host;
237 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
238 	unsigned nr_dirty;
239 	int err;
240 
241 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
242 						  start + copied);
243 	copied = generic_write_end(file, mapping, pos, len, copied, page,
244 				   fsdata);
245 	nilfs_set_file_dirty(inode, nr_dirty);
246 	err = nilfs_transaction_commit(inode->i_sb);
247 	return err ? : copied;
248 }
249 
250 static ssize_t
251 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
252 		loff_t offset, unsigned long nr_segs)
253 {
254 	struct file *file = iocb->ki_filp;
255 	struct inode *inode = file->f_mapping->host;
256 	ssize_t size;
257 
258 	if (rw == WRITE)
259 		return 0;
260 
261 	/* Needs synchronization with the cleaner */
262 	size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
263 				  nilfs_get_block);
264 
265 	/*
266 	 * In case of error extending write may have instantiated a few
267 	 * blocks outside i_size. Trim these off again.
268 	 */
269 	if (unlikely((rw & WRITE) && size < 0)) {
270 		loff_t isize = i_size_read(inode);
271 		loff_t end = offset + iov_length(iov, nr_segs);
272 
273 		if (end > isize)
274 			vmtruncate(inode, isize);
275 	}
276 
277 	return size;
278 }
279 
280 const struct address_space_operations nilfs_aops = {
281 	.writepage		= nilfs_writepage,
282 	.readpage		= nilfs_readpage,
283 	.writepages		= nilfs_writepages,
284 	.set_page_dirty		= nilfs_set_page_dirty,
285 	.readpages		= nilfs_readpages,
286 	.write_begin		= nilfs_write_begin,
287 	.write_end		= nilfs_write_end,
288 	/* .releasepage		= nilfs_releasepage, */
289 	.invalidatepage		= block_invalidatepage,
290 	.direct_IO		= nilfs_direct_IO,
291 	.is_partially_uptodate  = block_is_partially_uptodate,
292 };
293 
294 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
295 {
296 	struct super_block *sb = dir->i_sb;
297 	struct the_nilfs *nilfs = sb->s_fs_info;
298 	struct inode *inode;
299 	struct nilfs_inode_info *ii;
300 	struct nilfs_root *root;
301 	int err = -ENOMEM;
302 	ino_t ino;
303 
304 	inode = new_inode(sb);
305 	if (unlikely(!inode))
306 		goto failed;
307 
308 	mapping_set_gfp_mask(inode->i_mapping,
309 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
310 
311 	root = NILFS_I(dir)->i_root;
312 	ii = NILFS_I(inode);
313 	ii->i_state = 1 << NILFS_I_NEW;
314 	ii->i_root = root;
315 
316 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
317 	if (unlikely(err))
318 		goto failed_ifile_create_inode;
319 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
320 
321 	atomic_inc(&root->inodes_count);
322 	inode_init_owner(inode, dir, mode);
323 	inode->i_ino = ino;
324 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
325 
326 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
327 		err = nilfs_bmap_read(ii->i_bmap, NULL);
328 		if (err < 0)
329 			goto failed_bmap;
330 
331 		set_bit(NILFS_I_BMAP, &ii->i_state);
332 		/* No lock is needed; iget() ensures it. */
333 	}
334 
335 	ii->i_flags = nilfs_mask_flags(
336 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
337 
338 	/* ii->i_file_acl = 0; */
339 	/* ii->i_dir_acl = 0; */
340 	ii->i_dir_start_lookup = 0;
341 	nilfs_set_inode_flags(inode);
342 	spin_lock(&nilfs->ns_next_gen_lock);
343 	inode->i_generation = nilfs->ns_next_generation++;
344 	spin_unlock(&nilfs->ns_next_gen_lock);
345 	insert_inode_hash(inode);
346 
347 	err = nilfs_init_acl(inode, dir);
348 	if (unlikely(err))
349 		goto failed_acl; /* never occur. When supporting
350 				    nilfs_init_acl(), proper cancellation of
351 				    above jobs should be considered */
352 
353 	return inode;
354 
355  failed_acl:
356  failed_bmap:
357 	clear_nlink(inode);
358 	iput(inode);  /* raw_inode will be deleted through
359 			 generic_delete_inode() */
360 	goto failed;
361 
362  failed_ifile_create_inode:
363 	make_bad_inode(inode);
364 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
365 			 called */
366  failed:
367 	return ERR_PTR(err);
368 }
369 
370 void nilfs_set_inode_flags(struct inode *inode)
371 {
372 	unsigned int flags = NILFS_I(inode)->i_flags;
373 
374 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
375 			    S_DIRSYNC);
376 	if (flags & FS_SYNC_FL)
377 		inode->i_flags |= S_SYNC;
378 	if (flags & FS_APPEND_FL)
379 		inode->i_flags |= S_APPEND;
380 	if (flags & FS_IMMUTABLE_FL)
381 		inode->i_flags |= S_IMMUTABLE;
382 	if (flags & FS_NOATIME_FL)
383 		inode->i_flags |= S_NOATIME;
384 	if (flags & FS_DIRSYNC_FL)
385 		inode->i_flags |= S_DIRSYNC;
386 	mapping_set_gfp_mask(inode->i_mapping,
387 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
388 }
389 
390 int nilfs_read_inode_common(struct inode *inode,
391 			    struct nilfs_inode *raw_inode)
392 {
393 	struct nilfs_inode_info *ii = NILFS_I(inode);
394 	int err;
395 
396 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
397 	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
398 	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
399 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
400 	inode->i_size = le64_to_cpu(raw_inode->i_size);
401 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
402 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
403 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
404 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
405 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
406 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
407 	if (inode->i_nlink == 0 && inode->i_mode == 0)
408 		return -EINVAL; /* this inode is deleted */
409 
410 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
411 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
412 #if 0
413 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
414 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
415 		0 : le32_to_cpu(raw_inode->i_dir_acl);
416 #endif
417 	ii->i_dir_start_lookup = 0;
418 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
419 
420 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
421 	    S_ISLNK(inode->i_mode)) {
422 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
423 		if (err < 0)
424 			return err;
425 		set_bit(NILFS_I_BMAP, &ii->i_state);
426 		/* No lock is needed; iget() ensures it. */
427 	}
428 	return 0;
429 }
430 
431 static int __nilfs_read_inode(struct super_block *sb,
432 			      struct nilfs_root *root, unsigned long ino,
433 			      struct inode *inode)
434 {
435 	struct the_nilfs *nilfs = sb->s_fs_info;
436 	struct buffer_head *bh;
437 	struct nilfs_inode *raw_inode;
438 	int err;
439 
440 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
441 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
442 	if (unlikely(err))
443 		goto bad_inode;
444 
445 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
446 
447 	err = nilfs_read_inode_common(inode, raw_inode);
448 	if (err)
449 		goto failed_unmap;
450 
451 	if (S_ISREG(inode->i_mode)) {
452 		inode->i_op = &nilfs_file_inode_operations;
453 		inode->i_fop = &nilfs_file_operations;
454 		inode->i_mapping->a_ops = &nilfs_aops;
455 	} else if (S_ISDIR(inode->i_mode)) {
456 		inode->i_op = &nilfs_dir_inode_operations;
457 		inode->i_fop = &nilfs_dir_operations;
458 		inode->i_mapping->a_ops = &nilfs_aops;
459 	} else if (S_ISLNK(inode->i_mode)) {
460 		inode->i_op = &nilfs_symlink_inode_operations;
461 		inode->i_mapping->a_ops = &nilfs_aops;
462 	} else {
463 		inode->i_op = &nilfs_special_inode_operations;
464 		init_special_inode(
465 			inode, inode->i_mode,
466 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
467 	}
468 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
469 	brelse(bh);
470 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
471 	nilfs_set_inode_flags(inode);
472 	return 0;
473 
474  failed_unmap:
475 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
476 	brelse(bh);
477 
478  bad_inode:
479 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
480 	return err;
481 }
482 
483 static int nilfs_iget_test(struct inode *inode, void *opaque)
484 {
485 	struct nilfs_iget_args *args = opaque;
486 	struct nilfs_inode_info *ii;
487 
488 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
489 		return 0;
490 
491 	ii = NILFS_I(inode);
492 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
493 		return !args->for_gc;
494 
495 	return args->for_gc && args->cno == ii->i_cno;
496 }
497 
498 static int nilfs_iget_set(struct inode *inode, void *opaque)
499 {
500 	struct nilfs_iget_args *args = opaque;
501 
502 	inode->i_ino = args->ino;
503 	if (args->for_gc) {
504 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
505 		NILFS_I(inode)->i_cno = args->cno;
506 		NILFS_I(inode)->i_root = NULL;
507 	} else {
508 		if (args->root && args->ino == NILFS_ROOT_INO)
509 			nilfs_get_root(args->root);
510 		NILFS_I(inode)->i_root = args->root;
511 	}
512 	return 0;
513 }
514 
515 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
516 			    unsigned long ino)
517 {
518 	struct nilfs_iget_args args = {
519 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
520 	};
521 
522 	return ilookup5(sb, ino, nilfs_iget_test, &args);
523 }
524 
525 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
526 				unsigned long ino)
527 {
528 	struct nilfs_iget_args args = {
529 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
530 	};
531 
532 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
533 }
534 
535 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
536 			 unsigned long ino)
537 {
538 	struct inode *inode;
539 	int err;
540 
541 	inode = nilfs_iget_locked(sb, root, ino);
542 	if (unlikely(!inode))
543 		return ERR_PTR(-ENOMEM);
544 	if (!(inode->i_state & I_NEW))
545 		return inode;
546 
547 	err = __nilfs_read_inode(sb, root, ino, inode);
548 	if (unlikely(err)) {
549 		iget_failed(inode);
550 		return ERR_PTR(err);
551 	}
552 	unlock_new_inode(inode);
553 	return inode;
554 }
555 
556 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
557 				__u64 cno)
558 {
559 	struct nilfs_iget_args args = {
560 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
561 	};
562 	struct inode *inode;
563 	int err;
564 
565 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
566 	if (unlikely(!inode))
567 		return ERR_PTR(-ENOMEM);
568 	if (!(inode->i_state & I_NEW))
569 		return inode;
570 
571 	err = nilfs_init_gcinode(inode);
572 	if (unlikely(err)) {
573 		iget_failed(inode);
574 		return ERR_PTR(err);
575 	}
576 	unlock_new_inode(inode);
577 	return inode;
578 }
579 
580 void nilfs_write_inode_common(struct inode *inode,
581 			      struct nilfs_inode *raw_inode, int has_bmap)
582 {
583 	struct nilfs_inode_info *ii = NILFS_I(inode);
584 
585 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
586 	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
587 	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
588 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
589 	raw_inode->i_size = cpu_to_le64(inode->i_size);
590 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
591 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
592 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
593 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
594 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
595 
596 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
597 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
598 
599 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
600 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
601 
602 		/* zero-fill unused portion in the case of super root block */
603 		raw_inode->i_xattr = 0;
604 		raw_inode->i_pad = 0;
605 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
606 		       nilfs->ns_inode_size - sizeof(*raw_inode));
607 	}
608 
609 	if (has_bmap)
610 		nilfs_bmap_write(ii->i_bmap, raw_inode);
611 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
612 		raw_inode->i_device_code =
613 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
614 	/* When extending inode, nilfs->ns_inode_size should be checked
615 	   for substitutions of appended fields */
616 }
617 
618 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
619 {
620 	ino_t ino = inode->i_ino;
621 	struct nilfs_inode_info *ii = NILFS_I(inode);
622 	struct inode *ifile = ii->i_root->ifile;
623 	struct nilfs_inode *raw_inode;
624 
625 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
626 
627 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
628 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
629 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
630 
631 	nilfs_write_inode_common(inode, raw_inode, 0);
632 		/* XXX: call with has_bmap = 0 is a workaround to avoid
633 		   deadlock of bmap. This delays update of i_bmap to just
634 		   before writing */
635 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
636 }
637 
638 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
639 
640 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
641 				unsigned long from)
642 {
643 	unsigned long b;
644 	int ret;
645 
646 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
647 		return;
648 repeat:
649 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
650 	if (ret == -ENOENT)
651 		return;
652 	else if (ret < 0)
653 		goto failed;
654 
655 	if (b < from)
656 		return;
657 
658 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
659 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
660 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
661 	if (!ret || (ret == -ENOMEM &&
662 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
663 		goto repeat;
664 
665 failed:
666 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
667 		      "failed to truncate bmap (ino=%lu, err=%d)",
668 		      ii->vfs_inode.i_ino, ret);
669 }
670 
671 void nilfs_truncate(struct inode *inode)
672 {
673 	unsigned long blkoff;
674 	unsigned int blocksize;
675 	struct nilfs_transaction_info ti;
676 	struct super_block *sb = inode->i_sb;
677 	struct nilfs_inode_info *ii = NILFS_I(inode);
678 
679 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
680 		return;
681 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
682 		return;
683 
684 	blocksize = sb->s_blocksize;
685 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
686 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
687 
688 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
689 
690 	nilfs_truncate_bmap(ii, blkoff);
691 
692 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
693 	if (IS_SYNC(inode))
694 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
695 
696 	nilfs_mark_inode_dirty(inode);
697 	nilfs_set_file_dirty(inode, 0);
698 	nilfs_transaction_commit(sb);
699 	/* May construct a logical segment and may fail in sync mode.
700 	   But truncate has no return value. */
701 }
702 
703 static void nilfs_clear_inode(struct inode *inode)
704 {
705 	struct nilfs_inode_info *ii = NILFS_I(inode);
706 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
707 
708 	/*
709 	 * Free resources allocated in nilfs_read_inode(), here.
710 	 */
711 	BUG_ON(!list_empty(&ii->i_dirty));
712 	brelse(ii->i_bh);
713 	ii->i_bh = NULL;
714 
715 	if (mdi && mdi->mi_palloc_cache)
716 		nilfs_palloc_destroy_cache(inode);
717 
718 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
719 		nilfs_bmap_clear(ii->i_bmap);
720 
721 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
722 
723 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
724 		nilfs_put_root(ii->i_root);
725 }
726 
727 void nilfs_evict_inode(struct inode *inode)
728 {
729 	struct nilfs_transaction_info ti;
730 	struct super_block *sb = inode->i_sb;
731 	struct nilfs_inode_info *ii = NILFS_I(inode);
732 	int ret;
733 
734 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
735 		if (inode->i_data.nrpages)
736 			truncate_inode_pages(&inode->i_data, 0);
737 		end_writeback(inode);
738 		nilfs_clear_inode(inode);
739 		return;
740 	}
741 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
742 
743 	if (inode->i_data.nrpages)
744 		truncate_inode_pages(&inode->i_data, 0);
745 
746 	/* TODO: some of the following operations may fail.  */
747 	nilfs_truncate_bmap(ii, 0);
748 	nilfs_mark_inode_dirty(inode);
749 	end_writeback(inode);
750 
751 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
752 	if (!ret)
753 		atomic_dec(&ii->i_root->inodes_count);
754 
755 	nilfs_clear_inode(inode);
756 
757 	if (IS_SYNC(inode))
758 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
759 	nilfs_transaction_commit(sb);
760 	/* May construct a logical segment and may fail in sync mode.
761 	   But delete_inode has no return value. */
762 }
763 
764 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
765 {
766 	struct nilfs_transaction_info ti;
767 	struct inode *inode = dentry->d_inode;
768 	struct super_block *sb = inode->i_sb;
769 	int err;
770 
771 	err = inode_change_ok(inode, iattr);
772 	if (err)
773 		return err;
774 
775 	err = nilfs_transaction_begin(sb, &ti, 0);
776 	if (unlikely(err))
777 		return err;
778 
779 	if ((iattr->ia_valid & ATTR_SIZE) &&
780 	    iattr->ia_size != i_size_read(inode)) {
781 		inode_dio_wait(inode);
782 
783 		err = vmtruncate(inode, iattr->ia_size);
784 		if (unlikely(err))
785 			goto out_err;
786 	}
787 
788 	setattr_copy(inode, iattr);
789 	mark_inode_dirty(inode);
790 
791 	if (iattr->ia_valid & ATTR_MODE) {
792 		err = nilfs_acl_chmod(inode);
793 		if (unlikely(err))
794 			goto out_err;
795 	}
796 
797 	return nilfs_transaction_commit(sb);
798 
799 out_err:
800 	nilfs_transaction_abort(sb);
801 	return err;
802 }
803 
804 int nilfs_permission(struct inode *inode, int mask)
805 {
806 	struct nilfs_root *root = NILFS_I(inode)->i_root;
807 	if ((mask & MAY_WRITE) && root &&
808 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
809 		return -EROFS; /* snapshot is not writable */
810 
811 	return generic_permission(inode, mask);
812 }
813 
814 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
815 {
816 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
817 	struct nilfs_inode_info *ii = NILFS_I(inode);
818 	int err;
819 
820 	spin_lock(&nilfs->ns_inode_lock);
821 	if (ii->i_bh == NULL) {
822 		spin_unlock(&nilfs->ns_inode_lock);
823 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
824 						  inode->i_ino, pbh);
825 		if (unlikely(err))
826 			return err;
827 		spin_lock(&nilfs->ns_inode_lock);
828 		if (ii->i_bh == NULL)
829 			ii->i_bh = *pbh;
830 		else {
831 			brelse(*pbh);
832 			*pbh = ii->i_bh;
833 		}
834 	} else
835 		*pbh = ii->i_bh;
836 
837 	get_bh(*pbh);
838 	spin_unlock(&nilfs->ns_inode_lock);
839 	return 0;
840 }
841 
842 int nilfs_inode_dirty(struct inode *inode)
843 {
844 	struct nilfs_inode_info *ii = NILFS_I(inode);
845 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
846 	int ret = 0;
847 
848 	if (!list_empty(&ii->i_dirty)) {
849 		spin_lock(&nilfs->ns_inode_lock);
850 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
851 			test_bit(NILFS_I_BUSY, &ii->i_state);
852 		spin_unlock(&nilfs->ns_inode_lock);
853 	}
854 	return ret;
855 }
856 
857 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
858 {
859 	struct nilfs_inode_info *ii = NILFS_I(inode);
860 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
861 
862 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
863 
864 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
865 		return 0;
866 
867 	spin_lock(&nilfs->ns_inode_lock);
868 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
869 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
870 		/* Because this routine may race with nilfs_dispose_list(),
871 		   we have to check NILFS_I_QUEUED here, too. */
872 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
873 			/* This will happen when somebody is freeing
874 			   this inode. */
875 			nilfs_warning(inode->i_sb, __func__,
876 				      "cannot get inode (ino=%lu)\n",
877 				      inode->i_ino);
878 			spin_unlock(&nilfs->ns_inode_lock);
879 			return -EINVAL; /* NILFS_I_DIRTY may remain for
880 					   freeing inode */
881 		}
882 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
883 		set_bit(NILFS_I_QUEUED, &ii->i_state);
884 	}
885 	spin_unlock(&nilfs->ns_inode_lock);
886 	return 0;
887 }
888 
889 int nilfs_mark_inode_dirty(struct inode *inode)
890 {
891 	struct buffer_head *ibh;
892 	int err;
893 
894 	err = nilfs_load_inode_block(inode, &ibh);
895 	if (unlikely(err)) {
896 		nilfs_warning(inode->i_sb, __func__,
897 			      "failed to reget inode block.\n");
898 		return err;
899 	}
900 	nilfs_update_inode(inode, ibh);
901 	mark_buffer_dirty(ibh);
902 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
903 	brelse(ibh);
904 	return 0;
905 }
906 
907 /**
908  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
909  * @inode: inode of the file to be registered.
910  *
911  * nilfs_dirty_inode() loads a inode block containing the specified
912  * @inode and copies data from a nilfs_inode to a corresponding inode
913  * entry in the inode block. This operation is excluded from the segment
914  * construction. This function can be called both as a single operation
915  * and as a part of indivisible file operations.
916  */
917 void nilfs_dirty_inode(struct inode *inode, int flags)
918 {
919 	struct nilfs_transaction_info ti;
920 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
921 
922 	if (is_bad_inode(inode)) {
923 		nilfs_warning(inode->i_sb, __func__,
924 			      "tried to mark bad_inode dirty. ignored.\n");
925 		dump_stack();
926 		return;
927 	}
928 	if (mdi) {
929 		nilfs_mdt_mark_dirty(inode);
930 		return;
931 	}
932 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
933 	nilfs_mark_inode_dirty(inode);
934 	nilfs_transaction_commit(inode->i_sb); /* never fails */
935 }
936 
937 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
938 		 __u64 start, __u64 len)
939 {
940 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
941 	__u64 logical = 0, phys = 0, size = 0;
942 	__u32 flags = 0;
943 	loff_t isize;
944 	sector_t blkoff, end_blkoff;
945 	sector_t delalloc_blkoff;
946 	unsigned long delalloc_blklen;
947 	unsigned int blkbits = inode->i_blkbits;
948 	int ret, n;
949 
950 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
951 	if (ret)
952 		return ret;
953 
954 	mutex_lock(&inode->i_mutex);
955 
956 	isize = i_size_read(inode);
957 
958 	blkoff = start >> blkbits;
959 	end_blkoff = (start + len - 1) >> blkbits;
960 
961 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
962 							&delalloc_blkoff);
963 
964 	do {
965 		__u64 blkphy;
966 		unsigned int maxblocks;
967 
968 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
969 			if (size) {
970 				/* End of the current extent */
971 				ret = fiemap_fill_next_extent(
972 					fieinfo, logical, phys, size, flags);
973 				if (ret)
974 					break;
975 			}
976 			if (blkoff > end_blkoff)
977 				break;
978 
979 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
980 			logical = blkoff << blkbits;
981 			phys = 0;
982 			size = delalloc_blklen << blkbits;
983 
984 			blkoff = delalloc_blkoff + delalloc_blklen;
985 			delalloc_blklen = nilfs_find_uncommitted_extent(
986 				inode, blkoff, &delalloc_blkoff);
987 			continue;
988 		}
989 
990 		/*
991 		 * Limit the number of blocks that we look up so as
992 		 * not to get into the next delayed allocation extent.
993 		 */
994 		maxblocks = INT_MAX;
995 		if (delalloc_blklen)
996 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
997 					  maxblocks);
998 		blkphy = 0;
999 
1000 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1001 		n = nilfs_bmap_lookup_contig(
1002 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1003 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1004 
1005 		if (n < 0) {
1006 			int past_eof;
1007 
1008 			if (unlikely(n != -ENOENT))
1009 				break; /* error */
1010 
1011 			/* HOLE */
1012 			blkoff++;
1013 			past_eof = ((blkoff << blkbits) >= isize);
1014 
1015 			if (size) {
1016 				/* End of the current extent */
1017 
1018 				if (past_eof)
1019 					flags |= FIEMAP_EXTENT_LAST;
1020 
1021 				ret = fiemap_fill_next_extent(
1022 					fieinfo, logical, phys, size, flags);
1023 				if (ret)
1024 					break;
1025 				size = 0;
1026 			}
1027 			if (blkoff > end_blkoff || past_eof)
1028 				break;
1029 		} else {
1030 			if (size) {
1031 				if (phys && blkphy << blkbits == phys + size) {
1032 					/* The current extent goes on */
1033 					size += n << blkbits;
1034 				} else {
1035 					/* Terminate the current extent */
1036 					ret = fiemap_fill_next_extent(
1037 						fieinfo, logical, phys, size,
1038 						flags);
1039 					if (ret || blkoff > end_blkoff)
1040 						break;
1041 
1042 					/* Start another extent */
1043 					flags = FIEMAP_EXTENT_MERGED;
1044 					logical = blkoff << blkbits;
1045 					phys = blkphy << blkbits;
1046 					size = n << blkbits;
1047 				}
1048 			} else {
1049 				/* Start a new extent */
1050 				flags = FIEMAP_EXTENT_MERGED;
1051 				logical = blkoff << blkbits;
1052 				phys = blkphy << blkbits;
1053 				size = n << blkbits;
1054 			}
1055 			blkoff += n;
1056 		}
1057 		cond_resched();
1058 	} while (true);
1059 
1060 	/* If ret is 1 then we just hit the end of the extent array */
1061 	if (ret == 1)
1062 		ret = 0;
1063 
1064 	mutex_unlock(&inode->i_mutex);
1065 	return ret;
1066 }
1067