xref: /openbmc/linux/fs/nilfs2/inode.c (revision 2d19961d)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * Written by Ryusuke Konishi.
17  *
18  */
19 
20 #include <linux/buffer_head.h>
21 #include <linux/gfp.h>
22 #include <linux/mpage.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/uio.h>
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "segment.h"
29 #include "page.h"
30 #include "mdt.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 
34 /**
35  * struct nilfs_iget_args - arguments used during comparison between inodes
36  * @ino: inode number
37  * @cno: checkpoint number
38  * @root: pointer on NILFS root object (mounted checkpoint)
39  * @for_gc: inode for GC flag
40  */
41 struct nilfs_iget_args {
42 	u64 ino;
43 	__u64 cno;
44 	struct nilfs_root *root;
45 	int for_gc;
46 };
47 
48 static int nilfs_iget_test(struct inode *inode, void *opaque);
49 
50 void nilfs_inode_add_blocks(struct inode *inode, int n)
51 {
52 	struct nilfs_root *root = NILFS_I(inode)->i_root;
53 
54 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
55 	if (root)
56 		atomic64_add(n, &root->blocks_count);
57 }
58 
59 void nilfs_inode_sub_blocks(struct inode *inode, int n)
60 {
61 	struct nilfs_root *root = NILFS_I(inode)->i_root;
62 
63 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
64 	if (root)
65 		atomic64_sub(n, &root->blocks_count);
66 }
67 
68 /**
69  * nilfs_get_block() - get a file block on the filesystem (callback function)
70  * @inode - inode struct of the target file
71  * @blkoff - file block number
72  * @bh_result - buffer head to be mapped on
73  * @create - indicate whether allocating the block or not when it has not
74  *      been allocated yet.
75  *
76  * This function does not issue actual read request of the specified data
77  * block. It is done by VFS.
78  */
79 int nilfs_get_block(struct inode *inode, sector_t blkoff,
80 		    struct buffer_head *bh_result, int create)
81 {
82 	struct nilfs_inode_info *ii = NILFS_I(inode);
83 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
84 	__u64 blknum = 0;
85 	int err = 0, ret;
86 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
87 
88 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
89 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
90 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
91 	if (ret >= 0) {	/* found */
92 		map_bh(bh_result, inode->i_sb, blknum);
93 		if (ret > 0)
94 			bh_result->b_size = (ret << inode->i_blkbits);
95 		goto out;
96 	}
97 	/* data block was not found */
98 	if (ret == -ENOENT && create) {
99 		struct nilfs_transaction_info ti;
100 
101 		bh_result->b_blocknr = 0;
102 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
103 		if (unlikely(err))
104 			goto out;
105 		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
106 					(unsigned long)bh_result);
107 		if (unlikely(err != 0)) {
108 			if (err == -EEXIST) {
109 				/*
110 				 * The get_block() function could be called
111 				 * from multiple callers for an inode.
112 				 * However, the page having this block must
113 				 * be locked in this case.
114 				 */
115 				printk(KERN_WARNING
116 				       "nilfs_get_block: a race condition "
117 				       "while inserting a data block. "
118 				       "(inode number=%lu, file block "
119 				       "offset=%llu)\n",
120 				       inode->i_ino,
121 				       (unsigned long long)blkoff);
122 				err = 0;
123 			}
124 			nilfs_transaction_abort(inode->i_sb);
125 			goto out;
126 		}
127 		nilfs_mark_inode_dirty_sync(inode);
128 		nilfs_transaction_commit(inode->i_sb); /* never fails */
129 		/* Error handling should be detailed */
130 		set_buffer_new(bh_result);
131 		set_buffer_delay(bh_result);
132 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
133 						      to proper value */
134 	} else if (ret == -ENOENT) {
135 		/* not found is not error (e.g. hole); must return without
136 		   the mapped state flag. */
137 		;
138 	} else {
139 		err = ret;
140 	}
141 
142  out:
143 	return err;
144 }
145 
146 /**
147  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
148  * address_space_operations.
149  * @file - file struct of the file to be read
150  * @page - the page to be read
151  */
152 static int nilfs_readpage(struct file *file, struct page *page)
153 {
154 	return mpage_readpage(page, nilfs_get_block);
155 }
156 
157 /**
158  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
159  * address_space_operations.
160  * @file - file struct of the file to be read
161  * @mapping - address_space struct used for reading multiple pages
162  * @pages - the pages to be read
163  * @nr_pages - number of pages to be read
164  */
165 static int nilfs_readpages(struct file *file, struct address_space *mapping,
166 			   struct list_head *pages, unsigned nr_pages)
167 {
168 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
169 }
170 
171 static int nilfs_writepages(struct address_space *mapping,
172 			    struct writeback_control *wbc)
173 {
174 	struct inode *inode = mapping->host;
175 	int err = 0;
176 
177 	if (inode->i_sb->s_flags & MS_RDONLY) {
178 		nilfs_clear_dirty_pages(mapping, false);
179 		return -EROFS;
180 	}
181 
182 	if (wbc->sync_mode == WB_SYNC_ALL)
183 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
184 						    wbc->range_start,
185 						    wbc->range_end);
186 	return err;
187 }
188 
189 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
190 {
191 	struct inode *inode = page->mapping->host;
192 	int err;
193 
194 	if (inode->i_sb->s_flags & MS_RDONLY) {
195 		/*
196 		 * It means that filesystem was remounted in read-only
197 		 * mode because of error or metadata corruption. But we
198 		 * have dirty pages that try to be flushed in background.
199 		 * So, here we simply discard this dirty page.
200 		 */
201 		nilfs_clear_dirty_page(page, false);
202 		unlock_page(page);
203 		return -EROFS;
204 	}
205 
206 	redirty_page_for_writepage(wbc, page);
207 	unlock_page(page);
208 
209 	if (wbc->sync_mode == WB_SYNC_ALL) {
210 		err = nilfs_construct_segment(inode->i_sb);
211 		if (unlikely(err))
212 			return err;
213 	} else if (wbc->for_reclaim)
214 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
215 
216 	return 0;
217 }
218 
219 static int nilfs_set_page_dirty(struct page *page)
220 {
221 	struct inode *inode = page->mapping->host;
222 	int ret = __set_page_dirty_nobuffers(page);
223 
224 	if (page_has_buffers(page)) {
225 		unsigned nr_dirty = 0;
226 		struct buffer_head *bh, *head;
227 
228 		/*
229 		 * This page is locked by callers, and no other thread
230 		 * concurrently marks its buffers dirty since they are
231 		 * only dirtied through routines in fs/buffer.c in
232 		 * which call sites of mark_buffer_dirty are protected
233 		 * by page lock.
234 		 */
235 		bh = head = page_buffers(page);
236 		do {
237 			/* Do not mark hole blocks dirty */
238 			if (buffer_dirty(bh) || !buffer_mapped(bh))
239 				continue;
240 
241 			set_buffer_dirty(bh);
242 			nr_dirty++;
243 		} while (bh = bh->b_this_page, bh != head);
244 
245 		if (nr_dirty)
246 			nilfs_set_file_dirty(inode, nr_dirty);
247 	} else if (ret) {
248 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
249 
250 		nilfs_set_file_dirty(inode, nr_dirty);
251 	}
252 	return ret;
253 }
254 
255 void nilfs_write_failed(struct address_space *mapping, loff_t to)
256 {
257 	struct inode *inode = mapping->host;
258 
259 	if (to > inode->i_size) {
260 		truncate_pagecache(inode, inode->i_size);
261 		nilfs_truncate(inode);
262 	}
263 }
264 
265 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
266 			     loff_t pos, unsigned len, unsigned flags,
267 			     struct page **pagep, void **fsdata)
268 
269 {
270 	struct inode *inode = mapping->host;
271 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
272 
273 	if (unlikely(err))
274 		return err;
275 
276 	err = block_write_begin(mapping, pos, len, flags, pagep,
277 				nilfs_get_block);
278 	if (unlikely(err)) {
279 		nilfs_write_failed(mapping, pos + len);
280 		nilfs_transaction_abort(inode->i_sb);
281 	}
282 	return err;
283 }
284 
285 static int nilfs_write_end(struct file *file, struct address_space *mapping,
286 			   loff_t pos, unsigned len, unsigned copied,
287 			   struct page *page, void *fsdata)
288 {
289 	struct inode *inode = mapping->host;
290 	unsigned start = pos & (PAGE_SIZE - 1);
291 	unsigned nr_dirty;
292 	int err;
293 
294 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
295 						  start + copied);
296 	copied = generic_write_end(file, mapping, pos, len, copied, page,
297 				   fsdata);
298 	nilfs_set_file_dirty(inode, nr_dirty);
299 	err = nilfs_transaction_commit(inode->i_sb);
300 	return err ? : copied;
301 }
302 
303 static ssize_t
304 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
305 {
306 	struct inode *inode = file_inode(iocb->ki_filp);
307 
308 	if (iov_iter_rw(iter) == WRITE)
309 		return 0;
310 
311 	/* Needs synchronization with the cleaner */
312 	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
313 }
314 
315 const struct address_space_operations nilfs_aops = {
316 	.writepage		= nilfs_writepage,
317 	.readpage		= nilfs_readpage,
318 	.writepages		= nilfs_writepages,
319 	.set_page_dirty		= nilfs_set_page_dirty,
320 	.readpages		= nilfs_readpages,
321 	.write_begin		= nilfs_write_begin,
322 	.write_end		= nilfs_write_end,
323 	/* .releasepage		= nilfs_releasepage, */
324 	.invalidatepage		= block_invalidatepage,
325 	.direct_IO		= nilfs_direct_IO,
326 	.is_partially_uptodate  = block_is_partially_uptodate,
327 };
328 
329 static int nilfs_insert_inode_locked(struct inode *inode,
330 				     struct nilfs_root *root,
331 				     unsigned long ino)
332 {
333 	struct nilfs_iget_args args = {
334 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
335 	};
336 
337 	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
338 }
339 
340 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
341 {
342 	struct super_block *sb = dir->i_sb;
343 	struct the_nilfs *nilfs = sb->s_fs_info;
344 	struct inode *inode;
345 	struct nilfs_inode_info *ii;
346 	struct nilfs_root *root;
347 	int err = -ENOMEM;
348 	ino_t ino;
349 
350 	inode = new_inode(sb);
351 	if (unlikely(!inode))
352 		goto failed;
353 
354 	mapping_set_gfp_mask(inode->i_mapping,
355 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
356 
357 	root = NILFS_I(dir)->i_root;
358 	ii = NILFS_I(inode);
359 	ii->i_state = 1 << NILFS_I_NEW;
360 	ii->i_root = root;
361 
362 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
363 	if (unlikely(err))
364 		goto failed_ifile_create_inode;
365 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
366 
367 	atomic64_inc(&root->inodes_count);
368 	inode_init_owner(inode, dir, mode);
369 	inode->i_ino = ino;
370 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
371 
372 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
373 		err = nilfs_bmap_read(ii->i_bmap, NULL);
374 		if (err < 0)
375 			goto failed_after_creation;
376 
377 		set_bit(NILFS_I_BMAP, &ii->i_state);
378 		/* No lock is needed; iget() ensures it. */
379 	}
380 
381 	ii->i_flags = nilfs_mask_flags(
382 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
383 
384 	/* ii->i_file_acl = 0; */
385 	/* ii->i_dir_acl = 0; */
386 	ii->i_dir_start_lookup = 0;
387 	nilfs_set_inode_flags(inode);
388 	spin_lock(&nilfs->ns_next_gen_lock);
389 	inode->i_generation = nilfs->ns_next_generation++;
390 	spin_unlock(&nilfs->ns_next_gen_lock);
391 	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
392 		err = -EIO;
393 		goto failed_after_creation;
394 	}
395 
396 	err = nilfs_init_acl(inode, dir);
397 	if (unlikely(err))
398 		goto failed_after_creation; /* never occur. When supporting
399 				    nilfs_init_acl(), proper cancellation of
400 				    above jobs should be considered */
401 
402 	return inode;
403 
404  failed_after_creation:
405 	clear_nlink(inode);
406 	unlock_new_inode(inode);
407 	iput(inode);  /* raw_inode will be deleted through
408 			 nilfs_evict_inode() */
409 	goto failed;
410 
411  failed_ifile_create_inode:
412 	make_bad_inode(inode);
413 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
414 			 called */
415  failed:
416 	return ERR_PTR(err);
417 }
418 
419 void nilfs_set_inode_flags(struct inode *inode)
420 {
421 	unsigned int flags = NILFS_I(inode)->i_flags;
422 	unsigned int new_fl = 0;
423 
424 	if (flags & FS_SYNC_FL)
425 		new_fl |= S_SYNC;
426 	if (flags & FS_APPEND_FL)
427 		new_fl |= S_APPEND;
428 	if (flags & FS_IMMUTABLE_FL)
429 		new_fl |= S_IMMUTABLE;
430 	if (flags & FS_NOATIME_FL)
431 		new_fl |= S_NOATIME;
432 	if (flags & FS_DIRSYNC_FL)
433 		new_fl |= S_DIRSYNC;
434 	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
435 			S_NOATIME | S_DIRSYNC);
436 }
437 
438 int nilfs_read_inode_common(struct inode *inode,
439 			    struct nilfs_inode *raw_inode)
440 {
441 	struct nilfs_inode_info *ii = NILFS_I(inode);
442 	int err;
443 
444 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
445 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
446 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
447 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
448 	inode->i_size = le64_to_cpu(raw_inode->i_size);
449 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
450 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
451 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
452 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
453 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
454 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
455 	if (inode->i_nlink == 0)
456 		return -ESTALE; /* this inode is deleted */
457 
458 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
459 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
460 #if 0
461 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
462 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
463 		0 : le32_to_cpu(raw_inode->i_dir_acl);
464 #endif
465 	ii->i_dir_start_lookup = 0;
466 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
467 
468 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
469 	    S_ISLNK(inode->i_mode)) {
470 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
471 		if (err < 0)
472 			return err;
473 		set_bit(NILFS_I_BMAP, &ii->i_state);
474 		/* No lock is needed; iget() ensures it. */
475 	}
476 	return 0;
477 }
478 
479 static int __nilfs_read_inode(struct super_block *sb,
480 			      struct nilfs_root *root, unsigned long ino,
481 			      struct inode *inode)
482 {
483 	struct the_nilfs *nilfs = sb->s_fs_info;
484 	struct buffer_head *bh;
485 	struct nilfs_inode *raw_inode;
486 	int err;
487 
488 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
489 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
490 	if (unlikely(err))
491 		goto bad_inode;
492 
493 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
494 
495 	err = nilfs_read_inode_common(inode, raw_inode);
496 	if (err)
497 		goto failed_unmap;
498 
499 	if (S_ISREG(inode->i_mode)) {
500 		inode->i_op = &nilfs_file_inode_operations;
501 		inode->i_fop = &nilfs_file_operations;
502 		inode->i_mapping->a_ops = &nilfs_aops;
503 	} else if (S_ISDIR(inode->i_mode)) {
504 		inode->i_op = &nilfs_dir_inode_operations;
505 		inode->i_fop = &nilfs_dir_operations;
506 		inode->i_mapping->a_ops = &nilfs_aops;
507 	} else if (S_ISLNK(inode->i_mode)) {
508 		inode->i_op = &nilfs_symlink_inode_operations;
509 		inode_nohighmem(inode);
510 		inode->i_mapping->a_ops = &nilfs_aops;
511 	} else {
512 		inode->i_op = &nilfs_special_inode_operations;
513 		init_special_inode(
514 			inode, inode->i_mode,
515 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
516 	}
517 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
518 	brelse(bh);
519 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
520 	nilfs_set_inode_flags(inode);
521 	mapping_set_gfp_mask(inode->i_mapping,
522 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
523 	return 0;
524 
525  failed_unmap:
526 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
527 	brelse(bh);
528 
529  bad_inode:
530 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
531 	return err;
532 }
533 
534 static int nilfs_iget_test(struct inode *inode, void *opaque)
535 {
536 	struct nilfs_iget_args *args = opaque;
537 	struct nilfs_inode_info *ii;
538 
539 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
540 		return 0;
541 
542 	ii = NILFS_I(inode);
543 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
544 		return !args->for_gc;
545 
546 	return args->for_gc && args->cno == ii->i_cno;
547 }
548 
549 static int nilfs_iget_set(struct inode *inode, void *opaque)
550 {
551 	struct nilfs_iget_args *args = opaque;
552 
553 	inode->i_ino = args->ino;
554 	if (args->for_gc) {
555 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
556 		NILFS_I(inode)->i_cno = args->cno;
557 		NILFS_I(inode)->i_root = NULL;
558 	} else {
559 		if (args->root && args->ino == NILFS_ROOT_INO)
560 			nilfs_get_root(args->root);
561 		NILFS_I(inode)->i_root = args->root;
562 	}
563 	return 0;
564 }
565 
566 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
567 			    unsigned long ino)
568 {
569 	struct nilfs_iget_args args = {
570 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
571 	};
572 
573 	return ilookup5(sb, ino, nilfs_iget_test, &args);
574 }
575 
576 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
577 				unsigned long ino)
578 {
579 	struct nilfs_iget_args args = {
580 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
581 	};
582 
583 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
584 }
585 
586 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
587 			 unsigned long ino)
588 {
589 	struct inode *inode;
590 	int err;
591 
592 	inode = nilfs_iget_locked(sb, root, ino);
593 	if (unlikely(!inode))
594 		return ERR_PTR(-ENOMEM);
595 	if (!(inode->i_state & I_NEW))
596 		return inode;
597 
598 	err = __nilfs_read_inode(sb, root, ino, inode);
599 	if (unlikely(err)) {
600 		iget_failed(inode);
601 		return ERR_PTR(err);
602 	}
603 	unlock_new_inode(inode);
604 	return inode;
605 }
606 
607 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
608 				__u64 cno)
609 {
610 	struct nilfs_iget_args args = {
611 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
612 	};
613 	struct inode *inode;
614 	int err;
615 
616 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
617 	if (unlikely(!inode))
618 		return ERR_PTR(-ENOMEM);
619 	if (!(inode->i_state & I_NEW))
620 		return inode;
621 
622 	err = nilfs_init_gcinode(inode);
623 	if (unlikely(err)) {
624 		iget_failed(inode);
625 		return ERR_PTR(err);
626 	}
627 	unlock_new_inode(inode);
628 	return inode;
629 }
630 
631 void nilfs_write_inode_common(struct inode *inode,
632 			      struct nilfs_inode *raw_inode, int has_bmap)
633 {
634 	struct nilfs_inode_info *ii = NILFS_I(inode);
635 
636 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
637 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
638 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
639 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
640 	raw_inode->i_size = cpu_to_le64(inode->i_size);
641 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
642 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
643 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
644 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
645 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
646 
647 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
648 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
649 
650 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
651 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
652 
653 		/* zero-fill unused portion in the case of super root block */
654 		raw_inode->i_xattr = 0;
655 		raw_inode->i_pad = 0;
656 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
657 		       nilfs->ns_inode_size - sizeof(*raw_inode));
658 	}
659 
660 	if (has_bmap)
661 		nilfs_bmap_write(ii->i_bmap, raw_inode);
662 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
663 		raw_inode->i_device_code =
664 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
665 	/* When extending inode, nilfs->ns_inode_size should be checked
666 	   for substitutions of appended fields */
667 }
668 
669 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
670 {
671 	ino_t ino = inode->i_ino;
672 	struct nilfs_inode_info *ii = NILFS_I(inode);
673 	struct inode *ifile = ii->i_root->ifile;
674 	struct nilfs_inode *raw_inode;
675 
676 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
677 
678 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
679 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
680 	if (flags & I_DIRTY_DATASYNC)
681 		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
682 
683 	nilfs_write_inode_common(inode, raw_inode, 0);
684 		/* XXX: call with has_bmap = 0 is a workaround to avoid
685 		   deadlock of bmap. This delays update of i_bmap to just
686 		   before writing */
687 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
688 }
689 
690 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
691 
692 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
693 				unsigned long from)
694 {
695 	__u64 b;
696 	int ret;
697 
698 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
699 		return;
700 repeat:
701 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
702 	if (ret == -ENOENT)
703 		return;
704 	else if (ret < 0)
705 		goto failed;
706 
707 	if (b < from)
708 		return;
709 
710 	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
711 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
712 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
713 	if (!ret || (ret == -ENOMEM &&
714 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
715 		goto repeat;
716 
717 failed:
718 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
719 		      "failed to truncate bmap (ino=%lu, err=%d)",
720 		      ii->vfs_inode.i_ino, ret);
721 }
722 
723 void nilfs_truncate(struct inode *inode)
724 {
725 	unsigned long blkoff;
726 	unsigned int blocksize;
727 	struct nilfs_transaction_info ti;
728 	struct super_block *sb = inode->i_sb;
729 	struct nilfs_inode_info *ii = NILFS_I(inode);
730 
731 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
732 		return;
733 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
734 		return;
735 
736 	blocksize = sb->s_blocksize;
737 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
738 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
739 
740 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
741 
742 	nilfs_truncate_bmap(ii, blkoff);
743 
744 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
745 	if (IS_SYNC(inode))
746 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
747 
748 	nilfs_mark_inode_dirty(inode);
749 	nilfs_set_file_dirty(inode, 0);
750 	nilfs_transaction_commit(sb);
751 	/* May construct a logical segment and may fail in sync mode.
752 	   But truncate has no return value. */
753 }
754 
755 static void nilfs_clear_inode(struct inode *inode)
756 {
757 	struct nilfs_inode_info *ii = NILFS_I(inode);
758 
759 	/*
760 	 * Free resources allocated in nilfs_read_inode(), here.
761 	 */
762 	BUG_ON(!list_empty(&ii->i_dirty));
763 	brelse(ii->i_bh);
764 	ii->i_bh = NULL;
765 
766 	if (nilfs_is_metadata_file_inode(inode))
767 		nilfs_mdt_clear(inode);
768 
769 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
770 		nilfs_bmap_clear(ii->i_bmap);
771 
772 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
773 
774 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
775 		nilfs_put_root(ii->i_root);
776 }
777 
778 void nilfs_evict_inode(struct inode *inode)
779 {
780 	struct nilfs_transaction_info ti;
781 	struct super_block *sb = inode->i_sb;
782 	struct nilfs_inode_info *ii = NILFS_I(inode);
783 	int ret;
784 
785 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
786 		truncate_inode_pages_final(&inode->i_data);
787 		clear_inode(inode);
788 		nilfs_clear_inode(inode);
789 		return;
790 	}
791 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
792 
793 	truncate_inode_pages_final(&inode->i_data);
794 
795 	/* TODO: some of the following operations may fail.  */
796 	nilfs_truncate_bmap(ii, 0);
797 	nilfs_mark_inode_dirty(inode);
798 	clear_inode(inode);
799 
800 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
801 	if (!ret)
802 		atomic64_dec(&ii->i_root->inodes_count);
803 
804 	nilfs_clear_inode(inode);
805 
806 	if (IS_SYNC(inode))
807 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
808 	nilfs_transaction_commit(sb);
809 	/* May construct a logical segment and may fail in sync mode.
810 	   But delete_inode has no return value. */
811 }
812 
813 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
814 {
815 	struct nilfs_transaction_info ti;
816 	struct inode *inode = d_inode(dentry);
817 	struct super_block *sb = inode->i_sb;
818 	int err;
819 
820 	err = inode_change_ok(inode, iattr);
821 	if (err)
822 		return err;
823 
824 	err = nilfs_transaction_begin(sb, &ti, 0);
825 	if (unlikely(err))
826 		return err;
827 
828 	if ((iattr->ia_valid & ATTR_SIZE) &&
829 	    iattr->ia_size != i_size_read(inode)) {
830 		inode_dio_wait(inode);
831 		truncate_setsize(inode, iattr->ia_size);
832 		nilfs_truncate(inode);
833 	}
834 
835 	setattr_copy(inode, iattr);
836 	mark_inode_dirty(inode);
837 
838 	if (iattr->ia_valid & ATTR_MODE) {
839 		err = nilfs_acl_chmod(inode);
840 		if (unlikely(err))
841 			goto out_err;
842 	}
843 
844 	return nilfs_transaction_commit(sb);
845 
846 out_err:
847 	nilfs_transaction_abort(sb);
848 	return err;
849 }
850 
851 int nilfs_permission(struct inode *inode, int mask)
852 {
853 	struct nilfs_root *root = NILFS_I(inode)->i_root;
854 	if ((mask & MAY_WRITE) && root &&
855 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
856 		return -EROFS; /* snapshot is not writable */
857 
858 	return generic_permission(inode, mask);
859 }
860 
861 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
862 {
863 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
864 	struct nilfs_inode_info *ii = NILFS_I(inode);
865 	int err;
866 
867 	spin_lock(&nilfs->ns_inode_lock);
868 	if (ii->i_bh == NULL) {
869 		spin_unlock(&nilfs->ns_inode_lock);
870 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
871 						  inode->i_ino, pbh);
872 		if (unlikely(err))
873 			return err;
874 		spin_lock(&nilfs->ns_inode_lock);
875 		if (ii->i_bh == NULL)
876 			ii->i_bh = *pbh;
877 		else {
878 			brelse(*pbh);
879 			*pbh = ii->i_bh;
880 		}
881 	} else
882 		*pbh = ii->i_bh;
883 
884 	get_bh(*pbh);
885 	spin_unlock(&nilfs->ns_inode_lock);
886 	return 0;
887 }
888 
889 int nilfs_inode_dirty(struct inode *inode)
890 {
891 	struct nilfs_inode_info *ii = NILFS_I(inode);
892 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
893 	int ret = 0;
894 
895 	if (!list_empty(&ii->i_dirty)) {
896 		spin_lock(&nilfs->ns_inode_lock);
897 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
898 			test_bit(NILFS_I_BUSY, &ii->i_state);
899 		spin_unlock(&nilfs->ns_inode_lock);
900 	}
901 	return ret;
902 }
903 
904 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
905 {
906 	struct nilfs_inode_info *ii = NILFS_I(inode);
907 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
908 
909 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
910 
911 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
912 		return 0;
913 
914 	spin_lock(&nilfs->ns_inode_lock);
915 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
916 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
917 		/* Because this routine may race with nilfs_dispose_list(),
918 		   we have to check NILFS_I_QUEUED here, too. */
919 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
920 			/* This will happen when somebody is freeing
921 			   this inode. */
922 			nilfs_warning(inode->i_sb, __func__,
923 				      "cannot get inode (ino=%lu)\n",
924 				      inode->i_ino);
925 			spin_unlock(&nilfs->ns_inode_lock);
926 			return -EINVAL; /* NILFS_I_DIRTY may remain for
927 					   freeing inode */
928 		}
929 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
930 		set_bit(NILFS_I_QUEUED, &ii->i_state);
931 	}
932 	spin_unlock(&nilfs->ns_inode_lock);
933 	return 0;
934 }
935 
936 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
937 {
938 	struct buffer_head *ibh;
939 	int err;
940 
941 	err = nilfs_load_inode_block(inode, &ibh);
942 	if (unlikely(err)) {
943 		nilfs_warning(inode->i_sb, __func__,
944 			      "failed to reget inode block.\n");
945 		return err;
946 	}
947 	nilfs_update_inode(inode, ibh, flags);
948 	mark_buffer_dirty(ibh);
949 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
950 	brelse(ibh);
951 	return 0;
952 }
953 
954 /**
955  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
956  * @inode: inode of the file to be registered.
957  *
958  * nilfs_dirty_inode() loads a inode block containing the specified
959  * @inode and copies data from a nilfs_inode to a corresponding inode
960  * entry in the inode block. This operation is excluded from the segment
961  * construction. This function can be called both as a single operation
962  * and as a part of indivisible file operations.
963  */
964 void nilfs_dirty_inode(struct inode *inode, int flags)
965 {
966 	struct nilfs_transaction_info ti;
967 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
968 
969 	if (is_bad_inode(inode)) {
970 		nilfs_warning(inode->i_sb, __func__,
971 			      "tried to mark bad_inode dirty. ignored.\n");
972 		dump_stack();
973 		return;
974 	}
975 	if (mdi) {
976 		nilfs_mdt_mark_dirty(inode);
977 		return;
978 	}
979 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
980 	__nilfs_mark_inode_dirty(inode, flags);
981 	nilfs_transaction_commit(inode->i_sb); /* never fails */
982 }
983 
984 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
985 		 __u64 start, __u64 len)
986 {
987 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
988 	__u64 logical = 0, phys = 0, size = 0;
989 	__u32 flags = 0;
990 	loff_t isize;
991 	sector_t blkoff, end_blkoff;
992 	sector_t delalloc_blkoff;
993 	unsigned long delalloc_blklen;
994 	unsigned int blkbits = inode->i_blkbits;
995 	int ret, n;
996 
997 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
998 	if (ret)
999 		return ret;
1000 
1001 	inode_lock(inode);
1002 
1003 	isize = i_size_read(inode);
1004 
1005 	blkoff = start >> blkbits;
1006 	end_blkoff = (start + len - 1) >> blkbits;
1007 
1008 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1009 							&delalloc_blkoff);
1010 
1011 	do {
1012 		__u64 blkphy;
1013 		unsigned int maxblocks;
1014 
1015 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1016 			if (size) {
1017 				/* End of the current extent */
1018 				ret = fiemap_fill_next_extent(
1019 					fieinfo, logical, phys, size, flags);
1020 				if (ret)
1021 					break;
1022 			}
1023 			if (blkoff > end_blkoff)
1024 				break;
1025 
1026 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1027 			logical = blkoff << blkbits;
1028 			phys = 0;
1029 			size = delalloc_blklen << blkbits;
1030 
1031 			blkoff = delalloc_blkoff + delalloc_blklen;
1032 			delalloc_blklen = nilfs_find_uncommitted_extent(
1033 				inode, blkoff, &delalloc_blkoff);
1034 			continue;
1035 		}
1036 
1037 		/*
1038 		 * Limit the number of blocks that we look up so as
1039 		 * not to get into the next delayed allocation extent.
1040 		 */
1041 		maxblocks = INT_MAX;
1042 		if (delalloc_blklen)
1043 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1044 					  maxblocks);
1045 		blkphy = 0;
1046 
1047 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1048 		n = nilfs_bmap_lookup_contig(
1049 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1050 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1051 
1052 		if (n < 0) {
1053 			int past_eof;
1054 
1055 			if (unlikely(n != -ENOENT))
1056 				break; /* error */
1057 
1058 			/* HOLE */
1059 			blkoff++;
1060 			past_eof = ((blkoff << blkbits) >= isize);
1061 
1062 			if (size) {
1063 				/* End of the current extent */
1064 
1065 				if (past_eof)
1066 					flags |= FIEMAP_EXTENT_LAST;
1067 
1068 				ret = fiemap_fill_next_extent(
1069 					fieinfo, logical, phys, size, flags);
1070 				if (ret)
1071 					break;
1072 				size = 0;
1073 			}
1074 			if (blkoff > end_blkoff || past_eof)
1075 				break;
1076 		} else {
1077 			if (size) {
1078 				if (phys && blkphy << blkbits == phys + size) {
1079 					/* The current extent goes on */
1080 					size += n << blkbits;
1081 				} else {
1082 					/* Terminate the current extent */
1083 					ret = fiemap_fill_next_extent(
1084 						fieinfo, logical, phys, size,
1085 						flags);
1086 					if (ret || blkoff > end_blkoff)
1087 						break;
1088 
1089 					/* Start another extent */
1090 					flags = FIEMAP_EXTENT_MERGED;
1091 					logical = blkoff << blkbits;
1092 					phys = blkphy << blkbits;
1093 					size = n << blkbits;
1094 				}
1095 			} else {
1096 				/* Start a new extent */
1097 				flags = FIEMAP_EXTENT_MERGED;
1098 				logical = blkoff << blkbits;
1099 				phys = blkphy << blkbits;
1100 				size = n << blkbits;
1101 			}
1102 			blkoff += n;
1103 		}
1104 		cond_resched();
1105 	} while (true);
1106 
1107 	/* If ret is 1 then we just hit the end of the extent array */
1108 	if (ret == 1)
1109 		ret = 0;
1110 
1111 	inode_unlock(inode);
1112 	return ret;
1113 }
1114