xref: /openbmc/linux/fs/nilfs2/inode.c (revision 9d749629)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 /**
38  * struct nilfs_iget_args - arguments used during comparison between inodes
39  * @ino: inode number
40  * @cno: checkpoint number
41  * @root: pointer on NILFS root object (mounted checkpoint)
42  * @for_gc: inode for GC flag
43  */
44 struct nilfs_iget_args {
45 	u64 ino;
46 	__u64 cno;
47 	struct nilfs_root *root;
48 	int for_gc;
49 };
50 
51 void nilfs_inode_add_blocks(struct inode *inode, int n)
52 {
53 	struct nilfs_root *root = NILFS_I(inode)->i_root;
54 
55 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
56 	if (root)
57 		atomic_add(n, &root->blocks_count);
58 }
59 
60 void nilfs_inode_sub_blocks(struct inode *inode, int n)
61 {
62 	struct nilfs_root *root = NILFS_I(inode)->i_root;
63 
64 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
65 	if (root)
66 		atomic_sub(n, &root->blocks_count);
67 }
68 
69 /**
70  * nilfs_get_block() - get a file block on the filesystem (callback function)
71  * @inode - inode struct of the target file
72  * @blkoff - file block number
73  * @bh_result - buffer head to be mapped on
74  * @create - indicate whether allocating the block or not when it has not
75  *      been allocated yet.
76  *
77  * This function does not issue actual read request of the specified data
78  * block. It is done by VFS.
79  */
80 int nilfs_get_block(struct inode *inode, sector_t blkoff,
81 		    struct buffer_head *bh_result, int create)
82 {
83 	struct nilfs_inode_info *ii = NILFS_I(inode);
84 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
85 	__u64 blknum = 0;
86 	int err = 0, ret;
87 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
88 
89 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
90 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
91 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
92 	if (ret >= 0) {	/* found */
93 		map_bh(bh_result, inode->i_sb, blknum);
94 		if (ret > 0)
95 			bh_result->b_size = (ret << inode->i_blkbits);
96 		goto out;
97 	}
98 	/* data block was not found */
99 	if (ret == -ENOENT && create) {
100 		struct nilfs_transaction_info ti;
101 
102 		bh_result->b_blocknr = 0;
103 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
104 		if (unlikely(err))
105 			goto out;
106 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
107 					(unsigned long)bh_result);
108 		if (unlikely(err != 0)) {
109 			if (err == -EEXIST) {
110 				/*
111 				 * The get_block() function could be called
112 				 * from multiple callers for an inode.
113 				 * However, the page having this block must
114 				 * be locked in this case.
115 				 */
116 				printk(KERN_WARNING
117 				       "nilfs_get_block: a race condition "
118 				       "while inserting a data block. "
119 				       "(inode number=%lu, file block "
120 				       "offset=%llu)\n",
121 				       inode->i_ino,
122 				       (unsigned long long)blkoff);
123 				err = 0;
124 			}
125 			nilfs_transaction_abort(inode->i_sb);
126 			goto out;
127 		}
128 		nilfs_mark_inode_dirty(inode);
129 		nilfs_transaction_commit(inode->i_sb); /* never fails */
130 		/* Error handling should be detailed */
131 		set_buffer_new(bh_result);
132 		set_buffer_delay(bh_result);
133 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
134 						      to proper value */
135 	} else if (ret == -ENOENT) {
136 		/* not found is not error (e.g. hole); must return without
137 		   the mapped state flag. */
138 		;
139 	} else {
140 		err = ret;
141 	}
142 
143  out:
144 	return err;
145 }
146 
147 /**
148  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
149  * address_space_operations.
150  * @file - file struct of the file to be read
151  * @page - the page to be read
152  */
153 static int nilfs_readpage(struct file *file, struct page *page)
154 {
155 	return mpage_readpage(page, nilfs_get_block);
156 }
157 
158 /**
159  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
160  * address_space_operations.
161  * @file - file struct of the file to be read
162  * @mapping - address_space struct used for reading multiple pages
163  * @pages - the pages to be read
164  * @nr_pages - number of pages to be read
165  */
166 static int nilfs_readpages(struct file *file, struct address_space *mapping,
167 			   struct list_head *pages, unsigned nr_pages)
168 {
169 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
170 }
171 
172 static int nilfs_writepages(struct address_space *mapping,
173 			    struct writeback_control *wbc)
174 {
175 	struct inode *inode = mapping->host;
176 	int err = 0;
177 
178 	if (wbc->sync_mode == WB_SYNC_ALL)
179 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
180 						    wbc->range_start,
181 						    wbc->range_end);
182 	return err;
183 }
184 
185 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
186 {
187 	struct inode *inode = page->mapping->host;
188 	int err;
189 
190 	redirty_page_for_writepage(wbc, page);
191 	unlock_page(page);
192 
193 	if (wbc->sync_mode == WB_SYNC_ALL) {
194 		err = nilfs_construct_segment(inode->i_sb);
195 		if (unlikely(err))
196 			return err;
197 	} else if (wbc->for_reclaim)
198 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
199 
200 	return 0;
201 }
202 
203 static int nilfs_set_page_dirty(struct page *page)
204 {
205 	int ret = __set_page_dirty_buffers(page);
206 
207 	if (ret) {
208 		struct inode *inode = page->mapping->host;
209 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
210 
211 		nilfs_set_file_dirty(inode, nr_dirty);
212 	}
213 	return ret;
214 }
215 
216 void nilfs_write_failed(struct address_space *mapping, loff_t to)
217 {
218 	struct inode *inode = mapping->host;
219 
220 	if (to > inode->i_size) {
221 		truncate_pagecache(inode, to, inode->i_size);
222 		nilfs_truncate(inode);
223 	}
224 }
225 
226 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
227 			     loff_t pos, unsigned len, unsigned flags,
228 			     struct page **pagep, void **fsdata)
229 
230 {
231 	struct inode *inode = mapping->host;
232 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
233 
234 	if (unlikely(err))
235 		return err;
236 
237 	err = block_write_begin(mapping, pos, len, flags, pagep,
238 				nilfs_get_block);
239 	if (unlikely(err)) {
240 		nilfs_write_failed(mapping, pos + len);
241 		nilfs_transaction_abort(inode->i_sb);
242 	}
243 	return err;
244 }
245 
246 static int nilfs_write_end(struct file *file, struct address_space *mapping,
247 			   loff_t pos, unsigned len, unsigned copied,
248 			   struct page *page, void *fsdata)
249 {
250 	struct inode *inode = mapping->host;
251 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
252 	unsigned nr_dirty;
253 	int err;
254 
255 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
256 						  start + copied);
257 	copied = generic_write_end(file, mapping, pos, len, copied, page,
258 				   fsdata);
259 	nilfs_set_file_dirty(inode, nr_dirty);
260 	err = nilfs_transaction_commit(inode->i_sb);
261 	return err ? : copied;
262 }
263 
264 static ssize_t
265 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
266 		loff_t offset, unsigned long nr_segs)
267 {
268 	struct file *file = iocb->ki_filp;
269 	struct address_space *mapping = file->f_mapping;
270 	struct inode *inode = file->f_mapping->host;
271 	ssize_t size;
272 
273 	if (rw == WRITE)
274 		return 0;
275 
276 	/* Needs synchronization with the cleaner */
277 	size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
278 				  nilfs_get_block);
279 
280 	/*
281 	 * In case of error extending write may have instantiated a few
282 	 * blocks outside i_size. Trim these off again.
283 	 */
284 	if (unlikely((rw & WRITE) && size < 0)) {
285 		loff_t isize = i_size_read(inode);
286 		loff_t end = offset + iov_length(iov, nr_segs);
287 
288 		if (end > isize)
289 			nilfs_write_failed(mapping, end);
290 	}
291 
292 	return size;
293 }
294 
295 const struct address_space_operations nilfs_aops = {
296 	.writepage		= nilfs_writepage,
297 	.readpage		= nilfs_readpage,
298 	.writepages		= nilfs_writepages,
299 	.set_page_dirty		= nilfs_set_page_dirty,
300 	.readpages		= nilfs_readpages,
301 	.write_begin		= nilfs_write_begin,
302 	.write_end		= nilfs_write_end,
303 	/* .releasepage		= nilfs_releasepage, */
304 	.invalidatepage		= block_invalidatepage,
305 	.direct_IO		= nilfs_direct_IO,
306 	.is_partially_uptodate  = block_is_partially_uptodate,
307 };
308 
309 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
310 {
311 	struct super_block *sb = dir->i_sb;
312 	struct the_nilfs *nilfs = sb->s_fs_info;
313 	struct inode *inode;
314 	struct nilfs_inode_info *ii;
315 	struct nilfs_root *root;
316 	int err = -ENOMEM;
317 	ino_t ino;
318 
319 	inode = new_inode(sb);
320 	if (unlikely(!inode))
321 		goto failed;
322 
323 	mapping_set_gfp_mask(inode->i_mapping,
324 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
325 
326 	root = NILFS_I(dir)->i_root;
327 	ii = NILFS_I(inode);
328 	ii->i_state = 1 << NILFS_I_NEW;
329 	ii->i_root = root;
330 
331 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
332 	if (unlikely(err))
333 		goto failed_ifile_create_inode;
334 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
335 
336 	atomic_inc(&root->inodes_count);
337 	inode_init_owner(inode, dir, mode);
338 	inode->i_ino = ino;
339 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
340 
341 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
342 		err = nilfs_bmap_read(ii->i_bmap, NULL);
343 		if (err < 0)
344 			goto failed_bmap;
345 
346 		set_bit(NILFS_I_BMAP, &ii->i_state);
347 		/* No lock is needed; iget() ensures it. */
348 	}
349 
350 	ii->i_flags = nilfs_mask_flags(
351 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
352 
353 	/* ii->i_file_acl = 0; */
354 	/* ii->i_dir_acl = 0; */
355 	ii->i_dir_start_lookup = 0;
356 	nilfs_set_inode_flags(inode);
357 	spin_lock(&nilfs->ns_next_gen_lock);
358 	inode->i_generation = nilfs->ns_next_generation++;
359 	spin_unlock(&nilfs->ns_next_gen_lock);
360 	insert_inode_hash(inode);
361 
362 	err = nilfs_init_acl(inode, dir);
363 	if (unlikely(err))
364 		goto failed_acl; /* never occur. When supporting
365 				    nilfs_init_acl(), proper cancellation of
366 				    above jobs should be considered */
367 
368 	return inode;
369 
370  failed_acl:
371  failed_bmap:
372 	clear_nlink(inode);
373 	iput(inode);  /* raw_inode will be deleted through
374 			 generic_delete_inode() */
375 	goto failed;
376 
377  failed_ifile_create_inode:
378 	make_bad_inode(inode);
379 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
380 			 called */
381  failed:
382 	return ERR_PTR(err);
383 }
384 
385 void nilfs_set_inode_flags(struct inode *inode)
386 {
387 	unsigned int flags = NILFS_I(inode)->i_flags;
388 
389 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
390 			    S_DIRSYNC);
391 	if (flags & FS_SYNC_FL)
392 		inode->i_flags |= S_SYNC;
393 	if (flags & FS_APPEND_FL)
394 		inode->i_flags |= S_APPEND;
395 	if (flags & FS_IMMUTABLE_FL)
396 		inode->i_flags |= S_IMMUTABLE;
397 	if (flags & FS_NOATIME_FL)
398 		inode->i_flags |= S_NOATIME;
399 	if (flags & FS_DIRSYNC_FL)
400 		inode->i_flags |= S_DIRSYNC;
401 	mapping_set_gfp_mask(inode->i_mapping,
402 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
403 }
404 
405 int nilfs_read_inode_common(struct inode *inode,
406 			    struct nilfs_inode *raw_inode)
407 {
408 	struct nilfs_inode_info *ii = NILFS_I(inode);
409 	int err;
410 
411 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
412 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
413 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
414 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
415 	inode->i_size = le64_to_cpu(raw_inode->i_size);
416 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
417 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
418 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
419 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
420 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
421 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
422 	if (inode->i_nlink == 0 && inode->i_mode == 0)
423 		return -EINVAL; /* this inode is deleted */
424 
425 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
426 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
427 #if 0
428 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
429 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
430 		0 : le32_to_cpu(raw_inode->i_dir_acl);
431 #endif
432 	ii->i_dir_start_lookup = 0;
433 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
434 
435 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
436 	    S_ISLNK(inode->i_mode)) {
437 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
438 		if (err < 0)
439 			return err;
440 		set_bit(NILFS_I_BMAP, &ii->i_state);
441 		/* No lock is needed; iget() ensures it. */
442 	}
443 	return 0;
444 }
445 
446 static int __nilfs_read_inode(struct super_block *sb,
447 			      struct nilfs_root *root, unsigned long ino,
448 			      struct inode *inode)
449 {
450 	struct the_nilfs *nilfs = sb->s_fs_info;
451 	struct buffer_head *bh;
452 	struct nilfs_inode *raw_inode;
453 	int err;
454 
455 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
456 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
457 	if (unlikely(err))
458 		goto bad_inode;
459 
460 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
461 
462 	err = nilfs_read_inode_common(inode, raw_inode);
463 	if (err)
464 		goto failed_unmap;
465 
466 	if (S_ISREG(inode->i_mode)) {
467 		inode->i_op = &nilfs_file_inode_operations;
468 		inode->i_fop = &nilfs_file_operations;
469 		inode->i_mapping->a_ops = &nilfs_aops;
470 	} else if (S_ISDIR(inode->i_mode)) {
471 		inode->i_op = &nilfs_dir_inode_operations;
472 		inode->i_fop = &nilfs_dir_operations;
473 		inode->i_mapping->a_ops = &nilfs_aops;
474 	} else if (S_ISLNK(inode->i_mode)) {
475 		inode->i_op = &nilfs_symlink_inode_operations;
476 		inode->i_mapping->a_ops = &nilfs_aops;
477 	} else {
478 		inode->i_op = &nilfs_special_inode_operations;
479 		init_special_inode(
480 			inode, inode->i_mode,
481 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
482 	}
483 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
484 	brelse(bh);
485 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
486 	nilfs_set_inode_flags(inode);
487 	return 0;
488 
489  failed_unmap:
490 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
491 	brelse(bh);
492 
493  bad_inode:
494 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
495 	return err;
496 }
497 
498 static int nilfs_iget_test(struct inode *inode, void *opaque)
499 {
500 	struct nilfs_iget_args *args = opaque;
501 	struct nilfs_inode_info *ii;
502 
503 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
504 		return 0;
505 
506 	ii = NILFS_I(inode);
507 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
508 		return !args->for_gc;
509 
510 	return args->for_gc && args->cno == ii->i_cno;
511 }
512 
513 static int nilfs_iget_set(struct inode *inode, void *opaque)
514 {
515 	struct nilfs_iget_args *args = opaque;
516 
517 	inode->i_ino = args->ino;
518 	if (args->for_gc) {
519 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
520 		NILFS_I(inode)->i_cno = args->cno;
521 		NILFS_I(inode)->i_root = NULL;
522 	} else {
523 		if (args->root && args->ino == NILFS_ROOT_INO)
524 			nilfs_get_root(args->root);
525 		NILFS_I(inode)->i_root = args->root;
526 	}
527 	return 0;
528 }
529 
530 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
531 			    unsigned long ino)
532 {
533 	struct nilfs_iget_args args = {
534 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
535 	};
536 
537 	return ilookup5(sb, ino, nilfs_iget_test, &args);
538 }
539 
540 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
541 				unsigned long ino)
542 {
543 	struct nilfs_iget_args args = {
544 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
545 	};
546 
547 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
548 }
549 
550 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
551 			 unsigned long ino)
552 {
553 	struct inode *inode;
554 	int err;
555 
556 	inode = nilfs_iget_locked(sb, root, ino);
557 	if (unlikely(!inode))
558 		return ERR_PTR(-ENOMEM);
559 	if (!(inode->i_state & I_NEW))
560 		return inode;
561 
562 	err = __nilfs_read_inode(sb, root, ino, inode);
563 	if (unlikely(err)) {
564 		iget_failed(inode);
565 		return ERR_PTR(err);
566 	}
567 	unlock_new_inode(inode);
568 	return inode;
569 }
570 
571 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
572 				__u64 cno)
573 {
574 	struct nilfs_iget_args args = {
575 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
576 	};
577 	struct inode *inode;
578 	int err;
579 
580 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
581 	if (unlikely(!inode))
582 		return ERR_PTR(-ENOMEM);
583 	if (!(inode->i_state & I_NEW))
584 		return inode;
585 
586 	err = nilfs_init_gcinode(inode);
587 	if (unlikely(err)) {
588 		iget_failed(inode);
589 		return ERR_PTR(err);
590 	}
591 	unlock_new_inode(inode);
592 	return inode;
593 }
594 
595 void nilfs_write_inode_common(struct inode *inode,
596 			      struct nilfs_inode *raw_inode, int has_bmap)
597 {
598 	struct nilfs_inode_info *ii = NILFS_I(inode);
599 
600 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
601 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
602 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
603 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
604 	raw_inode->i_size = cpu_to_le64(inode->i_size);
605 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
606 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
607 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
608 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
609 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
610 
611 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
612 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
613 
614 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
615 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
616 
617 		/* zero-fill unused portion in the case of super root block */
618 		raw_inode->i_xattr = 0;
619 		raw_inode->i_pad = 0;
620 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
621 		       nilfs->ns_inode_size - sizeof(*raw_inode));
622 	}
623 
624 	if (has_bmap)
625 		nilfs_bmap_write(ii->i_bmap, raw_inode);
626 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
627 		raw_inode->i_device_code =
628 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
629 	/* When extending inode, nilfs->ns_inode_size should be checked
630 	   for substitutions of appended fields */
631 }
632 
633 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
634 {
635 	ino_t ino = inode->i_ino;
636 	struct nilfs_inode_info *ii = NILFS_I(inode);
637 	struct inode *ifile = ii->i_root->ifile;
638 	struct nilfs_inode *raw_inode;
639 
640 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
641 
642 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
643 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
644 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
645 
646 	nilfs_write_inode_common(inode, raw_inode, 0);
647 		/* XXX: call with has_bmap = 0 is a workaround to avoid
648 		   deadlock of bmap. This delays update of i_bmap to just
649 		   before writing */
650 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
651 }
652 
653 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
654 
655 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
656 				unsigned long from)
657 {
658 	unsigned long b;
659 	int ret;
660 
661 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
662 		return;
663 repeat:
664 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
665 	if (ret == -ENOENT)
666 		return;
667 	else if (ret < 0)
668 		goto failed;
669 
670 	if (b < from)
671 		return;
672 
673 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
674 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
675 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
676 	if (!ret || (ret == -ENOMEM &&
677 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
678 		goto repeat;
679 
680 failed:
681 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
682 		      "failed to truncate bmap (ino=%lu, err=%d)",
683 		      ii->vfs_inode.i_ino, ret);
684 }
685 
686 void nilfs_truncate(struct inode *inode)
687 {
688 	unsigned long blkoff;
689 	unsigned int blocksize;
690 	struct nilfs_transaction_info ti;
691 	struct super_block *sb = inode->i_sb;
692 	struct nilfs_inode_info *ii = NILFS_I(inode);
693 
694 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
695 		return;
696 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
697 		return;
698 
699 	blocksize = sb->s_blocksize;
700 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
701 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
702 
703 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
704 
705 	nilfs_truncate_bmap(ii, blkoff);
706 
707 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
708 	if (IS_SYNC(inode))
709 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
710 
711 	nilfs_mark_inode_dirty(inode);
712 	nilfs_set_file_dirty(inode, 0);
713 	nilfs_transaction_commit(sb);
714 	/* May construct a logical segment and may fail in sync mode.
715 	   But truncate has no return value. */
716 }
717 
718 static void nilfs_clear_inode(struct inode *inode)
719 {
720 	struct nilfs_inode_info *ii = NILFS_I(inode);
721 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
722 
723 	/*
724 	 * Free resources allocated in nilfs_read_inode(), here.
725 	 */
726 	BUG_ON(!list_empty(&ii->i_dirty));
727 	brelse(ii->i_bh);
728 	ii->i_bh = NULL;
729 
730 	if (mdi && mdi->mi_palloc_cache)
731 		nilfs_palloc_destroy_cache(inode);
732 
733 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
734 		nilfs_bmap_clear(ii->i_bmap);
735 
736 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
737 
738 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
739 		nilfs_put_root(ii->i_root);
740 }
741 
742 void nilfs_evict_inode(struct inode *inode)
743 {
744 	struct nilfs_transaction_info ti;
745 	struct super_block *sb = inode->i_sb;
746 	struct nilfs_inode_info *ii = NILFS_I(inode);
747 	int ret;
748 
749 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
750 		if (inode->i_data.nrpages)
751 			truncate_inode_pages(&inode->i_data, 0);
752 		clear_inode(inode);
753 		nilfs_clear_inode(inode);
754 		return;
755 	}
756 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
757 
758 	if (inode->i_data.nrpages)
759 		truncate_inode_pages(&inode->i_data, 0);
760 
761 	/* TODO: some of the following operations may fail.  */
762 	nilfs_truncate_bmap(ii, 0);
763 	nilfs_mark_inode_dirty(inode);
764 	clear_inode(inode);
765 
766 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
767 	if (!ret)
768 		atomic_dec(&ii->i_root->inodes_count);
769 
770 	nilfs_clear_inode(inode);
771 
772 	if (IS_SYNC(inode))
773 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
774 	nilfs_transaction_commit(sb);
775 	/* May construct a logical segment and may fail in sync mode.
776 	   But delete_inode has no return value. */
777 }
778 
779 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
780 {
781 	struct nilfs_transaction_info ti;
782 	struct inode *inode = dentry->d_inode;
783 	struct super_block *sb = inode->i_sb;
784 	int err;
785 
786 	err = inode_change_ok(inode, iattr);
787 	if (err)
788 		return err;
789 
790 	err = nilfs_transaction_begin(sb, &ti, 0);
791 	if (unlikely(err))
792 		return err;
793 
794 	if ((iattr->ia_valid & ATTR_SIZE) &&
795 	    iattr->ia_size != i_size_read(inode)) {
796 		inode_dio_wait(inode);
797 		truncate_setsize(inode, iattr->ia_size);
798 		nilfs_truncate(inode);
799 	}
800 
801 	setattr_copy(inode, iattr);
802 	mark_inode_dirty(inode);
803 
804 	if (iattr->ia_valid & ATTR_MODE) {
805 		err = nilfs_acl_chmod(inode);
806 		if (unlikely(err))
807 			goto out_err;
808 	}
809 
810 	return nilfs_transaction_commit(sb);
811 
812 out_err:
813 	nilfs_transaction_abort(sb);
814 	return err;
815 }
816 
817 int nilfs_permission(struct inode *inode, int mask)
818 {
819 	struct nilfs_root *root = NILFS_I(inode)->i_root;
820 	if ((mask & MAY_WRITE) && root &&
821 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
822 		return -EROFS; /* snapshot is not writable */
823 
824 	return generic_permission(inode, mask);
825 }
826 
827 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
828 {
829 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
830 	struct nilfs_inode_info *ii = NILFS_I(inode);
831 	int err;
832 
833 	spin_lock(&nilfs->ns_inode_lock);
834 	if (ii->i_bh == NULL) {
835 		spin_unlock(&nilfs->ns_inode_lock);
836 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
837 						  inode->i_ino, pbh);
838 		if (unlikely(err))
839 			return err;
840 		spin_lock(&nilfs->ns_inode_lock);
841 		if (ii->i_bh == NULL)
842 			ii->i_bh = *pbh;
843 		else {
844 			brelse(*pbh);
845 			*pbh = ii->i_bh;
846 		}
847 	} else
848 		*pbh = ii->i_bh;
849 
850 	get_bh(*pbh);
851 	spin_unlock(&nilfs->ns_inode_lock);
852 	return 0;
853 }
854 
855 int nilfs_inode_dirty(struct inode *inode)
856 {
857 	struct nilfs_inode_info *ii = NILFS_I(inode);
858 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
859 	int ret = 0;
860 
861 	if (!list_empty(&ii->i_dirty)) {
862 		spin_lock(&nilfs->ns_inode_lock);
863 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
864 			test_bit(NILFS_I_BUSY, &ii->i_state);
865 		spin_unlock(&nilfs->ns_inode_lock);
866 	}
867 	return ret;
868 }
869 
870 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
871 {
872 	struct nilfs_inode_info *ii = NILFS_I(inode);
873 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
874 
875 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
876 
877 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
878 		return 0;
879 
880 	spin_lock(&nilfs->ns_inode_lock);
881 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
882 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
883 		/* Because this routine may race with nilfs_dispose_list(),
884 		   we have to check NILFS_I_QUEUED here, too. */
885 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
886 			/* This will happen when somebody is freeing
887 			   this inode. */
888 			nilfs_warning(inode->i_sb, __func__,
889 				      "cannot get inode (ino=%lu)\n",
890 				      inode->i_ino);
891 			spin_unlock(&nilfs->ns_inode_lock);
892 			return -EINVAL; /* NILFS_I_DIRTY may remain for
893 					   freeing inode */
894 		}
895 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
896 		set_bit(NILFS_I_QUEUED, &ii->i_state);
897 	}
898 	spin_unlock(&nilfs->ns_inode_lock);
899 	return 0;
900 }
901 
902 int nilfs_mark_inode_dirty(struct inode *inode)
903 {
904 	struct buffer_head *ibh;
905 	int err;
906 
907 	err = nilfs_load_inode_block(inode, &ibh);
908 	if (unlikely(err)) {
909 		nilfs_warning(inode->i_sb, __func__,
910 			      "failed to reget inode block.\n");
911 		return err;
912 	}
913 	nilfs_update_inode(inode, ibh);
914 	mark_buffer_dirty(ibh);
915 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
916 	brelse(ibh);
917 	return 0;
918 }
919 
920 /**
921  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
922  * @inode: inode of the file to be registered.
923  *
924  * nilfs_dirty_inode() loads a inode block containing the specified
925  * @inode and copies data from a nilfs_inode to a corresponding inode
926  * entry in the inode block. This operation is excluded from the segment
927  * construction. This function can be called both as a single operation
928  * and as a part of indivisible file operations.
929  */
930 void nilfs_dirty_inode(struct inode *inode, int flags)
931 {
932 	struct nilfs_transaction_info ti;
933 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
934 
935 	if (is_bad_inode(inode)) {
936 		nilfs_warning(inode->i_sb, __func__,
937 			      "tried to mark bad_inode dirty. ignored.\n");
938 		dump_stack();
939 		return;
940 	}
941 	if (mdi) {
942 		nilfs_mdt_mark_dirty(inode);
943 		return;
944 	}
945 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
946 	nilfs_mark_inode_dirty(inode);
947 	nilfs_transaction_commit(inode->i_sb); /* never fails */
948 }
949 
950 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
951 		 __u64 start, __u64 len)
952 {
953 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
954 	__u64 logical = 0, phys = 0, size = 0;
955 	__u32 flags = 0;
956 	loff_t isize;
957 	sector_t blkoff, end_blkoff;
958 	sector_t delalloc_blkoff;
959 	unsigned long delalloc_blklen;
960 	unsigned int blkbits = inode->i_blkbits;
961 	int ret, n;
962 
963 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
964 	if (ret)
965 		return ret;
966 
967 	mutex_lock(&inode->i_mutex);
968 
969 	isize = i_size_read(inode);
970 
971 	blkoff = start >> blkbits;
972 	end_blkoff = (start + len - 1) >> blkbits;
973 
974 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
975 							&delalloc_blkoff);
976 
977 	do {
978 		__u64 blkphy;
979 		unsigned int maxblocks;
980 
981 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
982 			if (size) {
983 				/* End of the current extent */
984 				ret = fiemap_fill_next_extent(
985 					fieinfo, logical, phys, size, flags);
986 				if (ret)
987 					break;
988 			}
989 			if (blkoff > end_blkoff)
990 				break;
991 
992 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
993 			logical = blkoff << blkbits;
994 			phys = 0;
995 			size = delalloc_blklen << blkbits;
996 
997 			blkoff = delalloc_blkoff + delalloc_blklen;
998 			delalloc_blklen = nilfs_find_uncommitted_extent(
999 				inode, blkoff, &delalloc_blkoff);
1000 			continue;
1001 		}
1002 
1003 		/*
1004 		 * Limit the number of blocks that we look up so as
1005 		 * not to get into the next delayed allocation extent.
1006 		 */
1007 		maxblocks = INT_MAX;
1008 		if (delalloc_blklen)
1009 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1010 					  maxblocks);
1011 		blkphy = 0;
1012 
1013 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1014 		n = nilfs_bmap_lookup_contig(
1015 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1016 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1017 
1018 		if (n < 0) {
1019 			int past_eof;
1020 
1021 			if (unlikely(n != -ENOENT))
1022 				break; /* error */
1023 
1024 			/* HOLE */
1025 			blkoff++;
1026 			past_eof = ((blkoff << blkbits) >= isize);
1027 
1028 			if (size) {
1029 				/* End of the current extent */
1030 
1031 				if (past_eof)
1032 					flags |= FIEMAP_EXTENT_LAST;
1033 
1034 				ret = fiemap_fill_next_extent(
1035 					fieinfo, logical, phys, size, flags);
1036 				if (ret)
1037 					break;
1038 				size = 0;
1039 			}
1040 			if (blkoff > end_blkoff || past_eof)
1041 				break;
1042 		} else {
1043 			if (size) {
1044 				if (phys && blkphy << blkbits == phys + size) {
1045 					/* The current extent goes on */
1046 					size += n << blkbits;
1047 				} else {
1048 					/* Terminate the current extent */
1049 					ret = fiemap_fill_next_extent(
1050 						fieinfo, logical, phys, size,
1051 						flags);
1052 					if (ret || blkoff > end_blkoff)
1053 						break;
1054 
1055 					/* Start another extent */
1056 					flags = FIEMAP_EXTENT_MERGED;
1057 					logical = blkoff << blkbits;
1058 					phys = blkphy << blkbits;
1059 					size = n << blkbits;
1060 				}
1061 			} else {
1062 				/* Start a new extent */
1063 				flags = FIEMAP_EXTENT_MERGED;
1064 				logical = blkoff << blkbits;
1065 				phys = blkphy << blkbits;
1066 				size = n << blkbits;
1067 			}
1068 			blkoff += n;
1069 		}
1070 		cond_resched();
1071 	} while (true);
1072 
1073 	/* If ret is 1 then we just hit the end of the extent array */
1074 	if (ret == 1)
1075 		ret = 0;
1076 
1077 	mutex_unlock(&inode->i_mutex);
1078 	return ret;
1079 }
1080