xref: /openbmc/linux/fs/f2fs/inode.c (revision 7211ec63)
1 /*
2  * fs/f2fs/inode.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
16 
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 
21 #include <trace/events/f2fs.h>
22 
23 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
24 {
25 	if (f2fs_inode_dirtied(inode, sync))
26 		return;
27 
28 	mark_inode_dirty_sync(inode);
29 }
30 
31 void f2fs_set_inode_flags(struct inode *inode)
32 {
33 	unsigned int flags = F2FS_I(inode)->i_flags;
34 	unsigned int new_fl = 0;
35 
36 	if (flags & FS_SYNC_FL)
37 		new_fl |= S_SYNC;
38 	if (flags & FS_APPEND_FL)
39 		new_fl |= S_APPEND;
40 	if (flags & FS_IMMUTABLE_FL)
41 		new_fl |= S_IMMUTABLE;
42 	if (flags & FS_NOATIME_FL)
43 		new_fl |= S_NOATIME;
44 	if (flags & FS_DIRSYNC_FL)
45 		new_fl |= S_DIRSYNC;
46 	inode_set_flags(inode, new_fl,
47 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
48 }
49 
50 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
51 {
52 	int extra_size = get_extra_isize(inode);
53 
54 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
55 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
56 		if (ri->i_addr[extra_size])
57 			inode->i_rdev = old_decode_dev(
58 				le32_to_cpu(ri->i_addr[extra_size]));
59 		else
60 			inode->i_rdev = new_decode_dev(
61 				le32_to_cpu(ri->i_addr[extra_size + 1]));
62 	}
63 }
64 
65 static bool __written_first_block(struct f2fs_inode *ri)
66 {
67 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
68 
69 	if (addr != NEW_ADDR && addr != NULL_ADDR)
70 		return true;
71 	return false;
72 }
73 
74 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
75 {
76 	int extra_size = get_extra_isize(inode);
77 
78 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
79 		if (old_valid_dev(inode->i_rdev)) {
80 			ri->i_addr[extra_size] =
81 				cpu_to_le32(old_encode_dev(inode->i_rdev));
82 			ri->i_addr[extra_size + 1] = 0;
83 		} else {
84 			ri->i_addr[extra_size] = 0;
85 			ri->i_addr[extra_size + 1] =
86 				cpu_to_le32(new_encode_dev(inode->i_rdev));
87 			ri->i_addr[extra_size + 2] = 0;
88 		}
89 	}
90 }
91 
92 static void __recover_inline_status(struct inode *inode, struct page *ipage)
93 {
94 	void *inline_data = inline_data_addr(inode, ipage);
95 	__le32 *start = inline_data;
96 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
97 
98 	while (start < end) {
99 		if (*start++) {
100 			f2fs_wait_on_page_writeback(ipage, NODE, true);
101 
102 			set_inode_flag(inode, FI_DATA_EXIST);
103 			set_raw_inline(inode, F2FS_INODE(ipage));
104 			set_page_dirty(ipage);
105 			return;
106 		}
107 	}
108 	return;
109 }
110 
111 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
112 {
113 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
114 	int extra_isize = le32_to_cpu(ri->i_extra_isize);
115 
116 	if (!f2fs_sb_has_inode_chksum(sbi->sb))
117 		return false;
118 
119 	if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
120 		return false;
121 
122 	if (!F2FS_FITS_IN_INODE(ri, extra_isize, i_inode_checksum))
123 		return false;
124 
125 	return true;
126 }
127 
128 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
129 {
130 	struct f2fs_node *node = F2FS_NODE(page);
131 	struct f2fs_inode *ri = &node->i;
132 	__le32 ino = node->footer.ino;
133 	__le32 gen = ri->i_generation;
134 	__u32 chksum, chksum_seed;
135 	__u32 dummy_cs = 0;
136 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
137 	unsigned int cs_size = sizeof(dummy_cs);
138 
139 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
140 							sizeof(ino));
141 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
142 
143 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
144 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
145 	offset += cs_size;
146 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
147 						F2FS_BLKSIZE - offset);
148 	return chksum;
149 }
150 
151 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
152 {
153 	struct f2fs_inode *ri;
154 	__u32 provided, calculated;
155 
156 	if (!f2fs_enable_inode_chksum(sbi, page) ||
157 			PageDirty(page) || PageWriteback(page))
158 		return true;
159 
160 	ri = &F2FS_NODE(page)->i;
161 	provided = le32_to_cpu(ri->i_inode_checksum);
162 	calculated = f2fs_inode_chksum(sbi, page);
163 
164 	if (provided != calculated)
165 		f2fs_msg(sbi->sb, KERN_WARNING,
166 			"checksum invalid, ino = %x, %x vs. %x",
167 			ino_of_node(page), provided, calculated);
168 
169 	return provided == calculated;
170 }
171 
172 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
173 {
174 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
175 
176 	if (!f2fs_enable_inode_chksum(sbi, page))
177 		return;
178 
179 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
180 }
181 
182 static int do_read_inode(struct inode *inode)
183 {
184 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
185 	struct f2fs_inode_info *fi = F2FS_I(inode);
186 	struct page *node_page;
187 	struct f2fs_inode *ri;
188 	projid_t i_projid;
189 
190 	/* Check if ino is within scope */
191 	if (check_nid_range(sbi, inode->i_ino)) {
192 		f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
193 			 (unsigned long) inode->i_ino);
194 		WARN_ON(1);
195 		return -EINVAL;
196 	}
197 
198 	node_page = get_node_page(sbi, inode->i_ino);
199 	if (IS_ERR(node_page))
200 		return PTR_ERR(node_page);
201 
202 	ri = F2FS_INODE(node_page);
203 
204 	inode->i_mode = le16_to_cpu(ri->i_mode);
205 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
206 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
207 	set_nlink(inode, le32_to_cpu(ri->i_links));
208 	inode->i_size = le64_to_cpu(ri->i_size);
209 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
210 
211 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
212 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
213 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
214 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
215 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
216 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
217 	inode->i_generation = le32_to_cpu(ri->i_generation);
218 
219 	fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
220 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
221 	fi->i_flags = le32_to_cpu(ri->i_flags);
222 	fi->flags = 0;
223 	fi->i_advise = ri->i_advise;
224 	fi->i_pino = le32_to_cpu(ri->i_pino);
225 	fi->i_dir_level = ri->i_dir_level;
226 
227 	if (f2fs_init_extent_tree(inode, &ri->i_ext))
228 		set_page_dirty(node_page);
229 
230 	get_inline_info(inode, ri);
231 
232 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
233 					le16_to_cpu(ri->i_extra_isize) : 0;
234 
235 	/* check data exist */
236 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
237 		__recover_inline_status(inode, node_page);
238 
239 	/* get rdev by using inline_info */
240 	__get_inode_rdev(inode, ri);
241 
242 	if (__written_first_block(ri))
243 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
244 
245 	if (!need_inode_block_update(sbi, inode->i_ino))
246 		fi->last_disk_size = inode->i_size;
247 
248 	if (fi->i_flags & FS_PROJINHERIT_FL)
249 		set_inode_flag(inode, FI_PROJ_INHERIT);
250 
251 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
252 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
253 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
254 	else
255 		i_projid = F2FS_DEF_PROJID;
256 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
257 
258 	f2fs_put_page(node_page, 1);
259 
260 	stat_inc_inline_xattr(inode);
261 	stat_inc_inline_inode(inode);
262 	stat_inc_inline_dir(inode);
263 
264 	return 0;
265 }
266 
267 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
268 {
269 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
270 	struct inode *inode;
271 	int ret = 0;
272 
273 	inode = iget_locked(sb, ino);
274 	if (!inode)
275 		return ERR_PTR(-ENOMEM);
276 
277 	if (!(inode->i_state & I_NEW)) {
278 		trace_f2fs_iget(inode);
279 		return inode;
280 	}
281 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
282 		goto make_now;
283 
284 	ret = do_read_inode(inode);
285 	if (ret)
286 		goto bad_inode;
287 make_now:
288 	if (ino == F2FS_NODE_INO(sbi)) {
289 		inode->i_mapping->a_ops = &f2fs_node_aops;
290 		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
291 	} else if (ino == F2FS_META_INO(sbi)) {
292 		inode->i_mapping->a_ops = &f2fs_meta_aops;
293 		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
294 	} else if (S_ISREG(inode->i_mode)) {
295 		inode->i_op = &f2fs_file_inode_operations;
296 		inode->i_fop = &f2fs_file_operations;
297 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
298 	} else if (S_ISDIR(inode->i_mode)) {
299 		inode->i_op = &f2fs_dir_inode_operations;
300 		inode->i_fop = &f2fs_dir_operations;
301 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
302 		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
303 	} else if (S_ISLNK(inode->i_mode)) {
304 		if (f2fs_encrypted_inode(inode))
305 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
306 		else
307 			inode->i_op = &f2fs_symlink_inode_operations;
308 		inode_nohighmem(inode);
309 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
310 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
311 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
312 		inode->i_op = &f2fs_special_inode_operations;
313 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
314 	} else {
315 		ret = -EIO;
316 		goto bad_inode;
317 	}
318 	f2fs_set_inode_flags(inode);
319 	unlock_new_inode(inode);
320 	trace_f2fs_iget(inode);
321 	return inode;
322 
323 bad_inode:
324 	iget_failed(inode);
325 	trace_f2fs_iget_exit(inode, ret);
326 	return ERR_PTR(ret);
327 }
328 
329 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
330 {
331 	struct inode *inode;
332 retry:
333 	inode = f2fs_iget(sb, ino);
334 	if (IS_ERR(inode)) {
335 		if (PTR_ERR(inode) == -ENOMEM) {
336 			congestion_wait(BLK_RW_ASYNC, HZ/50);
337 			goto retry;
338 		}
339 	}
340 	return inode;
341 }
342 
343 int update_inode(struct inode *inode, struct page *node_page)
344 {
345 	struct f2fs_inode *ri;
346 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
347 
348 	f2fs_inode_synced(inode);
349 
350 	f2fs_wait_on_page_writeback(node_page, NODE, true);
351 
352 	ri = F2FS_INODE(node_page);
353 
354 	ri->i_mode = cpu_to_le16(inode->i_mode);
355 	ri->i_advise = F2FS_I(inode)->i_advise;
356 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
357 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
358 	ri->i_links = cpu_to_le32(inode->i_nlink);
359 	ri->i_size = cpu_to_le64(i_size_read(inode));
360 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
361 
362 	if (et) {
363 		read_lock(&et->lock);
364 		set_raw_extent(&et->largest, &ri->i_ext);
365 		read_unlock(&et->lock);
366 	} else {
367 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
368 	}
369 	set_raw_inline(inode, ri);
370 
371 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
372 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
373 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
374 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
375 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
376 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
377 	ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
378 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
379 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
380 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
381 	ri->i_generation = cpu_to_le32(inode->i_generation);
382 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
383 
384 	if (f2fs_has_extra_attr(inode)) {
385 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
386 
387 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
388 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
389 								i_projid)) {
390 			projid_t i_projid;
391 
392 			i_projid = from_kprojid(&init_user_ns,
393 						F2FS_I(inode)->i_projid);
394 			ri->i_projid = cpu_to_le32(i_projid);
395 		}
396 	}
397 
398 	__set_inode_rdev(inode, ri);
399 	set_cold_node(inode, node_page);
400 
401 	/* deleted inode */
402 	if (inode->i_nlink == 0)
403 		clear_inline_node(node_page);
404 
405 	return set_page_dirty(node_page);
406 }
407 
408 int update_inode_page(struct inode *inode)
409 {
410 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
411 	struct page *node_page;
412 	int ret = 0;
413 retry:
414 	node_page = get_node_page(sbi, inode->i_ino);
415 	if (IS_ERR(node_page)) {
416 		int err = PTR_ERR(node_page);
417 		if (err == -ENOMEM) {
418 			cond_resched();
419 			goto retry;
420 		} else if (err != -ENOENT) {
421 			f2fs_stop_checkpoint(sbi, false);
422 		}
423 		return 0;
424 	}
425 	ret = update_inode(inode, node_page);
426 	f2fs_put_page(node_page, 1);
427 	return ret;
428 }
429 
430 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
431 {
432 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
433 
434 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
435 			inode->i_ino == F2FS_META_INO(sbi))
436 		return 0;
437 
438 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
439 		return 0;
440 
441 	/*
442 	 * We need to balance fs here to prevent from producing dirty node pages
443 	 * during the urgent cleaning time when runing out of free sections.
444 	 */
445 	update_inode_page(inode);
446 	if (wbc && wbc->nr_to_write)
447 		f2fs_balance_fs(sbi, true);
448 	return 0;
449 }
450 
451 /*
452  * Called at the last iput() if i_nlink is zero
453  */
454 void f2fs_evict_inode(struct inode *inode)
455 {
456 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
457 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
458 	int err = 0;
459 
460 	/* some remained atomic pages should discarded */
461 	if (f2fs_is_atomic_file(inode))
462 		drop_inmem_pages(inode);
463 
464 	trace_f2fs_evict_inode(inode);
465 	truncate_inode_pages_final(&inode->i_data);
466 
467 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
468 			inode->i_ino == F2FS_META_INO(sbi))
469 		goto out_clear;
470 
471 	f2fs_bug_on(sbi, get_dirty_pages(inode));
472 	remove_dirty_inode(inode);
473 
474 	f2fs_destroy_extent_tree(inode);
475 
476 	if (inode->i_nlink || is_bad_inode(inode))
477 		goto no_delete;
478 
479 	dquot_initialize(inode);
480 
481 	remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
482 	remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
483 
484 	sb_start_intwrite(inode->i_sb);
485 	set_inode_flag(inode, FI_NO_ALLOC);
486 	i_size_write(inode, 0);
487 retry:
488 	if (F2FS_HAS_BLOCKS(inode))
489 		err = f2fs_truncate(inode);
490 
491 #ifdef CONFIG_F2FS_FAULT_INJECTION
492 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
493 		f2fs_show_injection_info(FAULT_EVICT_INODE);
494 		err = -EIO;
495 	}
496 #endif
497 	if (!err) {
498 		f2fs_lock_op(sbi);
499 		err = remove_inode_page(inode);
500 		f2fs_unlock_op(sbi);
501 		if (err == -ENOENT)
502 			err = 0;
503 	}
504 
505 	/* give more chances, if ENOMEM case */
506 	if (err == -ENOMEM) {
507 		err = 0;
508 		goto retry;
509 	}
510 
511 	if (err)
512 		update_inode_page(inode);
513 	dquot_free_inode(inode);
514 	sb_end_intwrite(inode->i_sb);
515 no_delete:
516 	dquot_drop(inode);
517 
518 	stat_dec_inline_xattr(inode);
519 	stat_dec_inline_dir(inode);
520 	stat_dec_inline_inode(inode);
521 
522 	if (!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))
523 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
524 
525 	/* ino == 0, if f2fs_new_inode() was failed t*/
526 	if (inode->i_ino)
527 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
528 							inode->i_ino);
529 	if (xnid)
530 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
531 	if (inode->i_nlink) {
532 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
533 			add_ino_entry(sbi, inode->i_ino, APPEND_INO);
534 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
535 			add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
536 	}
537 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
538 		alloc_nid_failed(sbi, inode->i_ino);
539 		clear_inode_flag(inode, FI_FREE_NID);
540 	} else {
541 		f2fs_bug_on(sbi, err &&
542 			!exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
543 	}
544 out_clear:
545 	fscrypt_put_encryption_info(inode, NULL);
546 	clear_inode(inode);
547 }
548 
549 /* caller should call f2fs_lock_op() */
550 void handle_failed_inode(struct inode *inode)
551 {
552 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
553 	struct node_info ni;
554 
555 	/*
556 	 * clear nlink of inode in order to release resource of inode
557 	 * immediately.
558 	 */
559 	clear_nlink(inode);
560 
561 	/*
562 	 * we must call this to avoid inode being remained as dirty, resulting
563 	 * in a panic when flushing dirty inodes in gdirty_list.
564 	 */
565 	update_inode_page(inode);
566 	f2fs_inode_synced(inode);
567 
568 	/* don't make bad inode, since it becomes a regular file. */
569 	unlock_new_inode(inode);
570 
571 	/*
572 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
573 	 * so we can prevent losing this orphan when encoutering checkpoint
574 	 * and following suddenly power-off.
575 	 */
576 	get_node_info(sbi, inode->i_ino, &ni);
577 
578 	if (ni.blk_addr != NULL_ADDR) {
579 		int err = acquire_orphan_inode(sbi);
580 		if (err) {
581 			set_sbi_flag(sbi, SBI_NEED_FSCK);
582 			f2fs_msg(sbi->sb, KERN_WARNING,
583 				"Too many orphan inodes, run fsck to fix.");
584 		} else {
585 			add_orphan_inode(inode);
586 		}
587 		alloc_nid_done(sbi, inode->i_ino);
588 	} else {
589 		set_inode_flag(inode, FI_FREE_NID);
590 	}
591 
592 	f2fs_unlock_op(sbi);
593 
594 	/* iput will drop the inode object */
595 	iput(inode);
596 }
597