xref: /openbmc/linux/fs/btrfs/tree-log.c (revision 83869019c74cc2d01c96a3be2463a4eebe362224)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "tree-log.h"
14 #include "disk-io.h"
15 #include "locking.h"
16 #include "print-tree.h"
17 #include "backref.h"
18 #include "compression.h"
19 #include "qgroup.h"
20 #include "block-group.h"
21 #include "space-info.h"
22 #include "zoned.h"
23 
24 /* magic values for the inode_only field in btrfs_log_inode:
25  *
26  * LOG_INODE_ALL means to log everything
27  * LOG_INODE_EXISTS means to log just enough to recreate the inode
28  * during log replay
29  */
30 enum {
31 	LOG_INODE_ALL,
32 	LOG_INODE_EXISTS,
33 	LOG_OTHER_INODE,
34 	LOG_OTHER_INODE_ALL,
35 };
36 
37 /*
38  * directory trouble cases
39  *
40  * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
41  * log, we must force a full commit before doing an fsync of the directory
42  * where the unlink was done.
43  * ---> record transid of last unlink/rename per directory
44  *
45  * mkdir foo/some_dir
46  * normal commit
47  * rename foo/some_dir foo2/some_dir
48  * mkdir foo/some_dir
49  * fsync foo/some_dir/some_file
50  *
51  * The fsync above will unlink the original some_dir without recording
52  * it in its new location (foo2).  After a crash, some_dir will be gone
53  * unless the fsync of some_file forces a full commit
54  *
55  * 2) we must log any new names for any file or dir that is in the fsync
56  * log. ---> check inode while renaming/linking.
57  *
58  * 2a) we must log any new names for any file or dir during rename
59  * when the directory they are being removed from was logged.
60  * ---> check inode and old parent dir during rename
61  *
62  *  2a is actually the more important variant.  With the extra logging
63  *  a crash might unlink the old name without recreating the new one
64  *
65  * 3) after a crash, we must go through any directories with a link count
66  * of zero and redo the rm -rf
67  *
68  * mkdir f1/foo
69  * normal commit
70  * rm -rf f1/foo
71  * fsync(f1)
72  *
73  * The directory f1 was fully removed from the FS, but fsync was never
74  * called on f1, only its parent dir.  After a crash the rm -rf must
75  * be replayed.  This must be able to recurse down the entire
76  * directory tree.  The inode link count fixup code takes care of the
77  * ugly details.
78  */
79 
80 /*
81  * stages for the tree walking.  The first
82  * stage (0) is to only pin down the blocks we find
83  * the second stage (1) is to make sure that all the inodes
84  * we find in the log are created in the subvolume.
85  *
86  * The last stage is to deal with directories and links and extents
87  * and all the other fun semantics
88  */
89 enum {
90 	LOG_WALK_PIN_ONLY,
91 	LOG_WALK_REPLAY_INODES,
92 	LOG_WALK_REPLAY_DIR_INDEX,
93 	LOG_WALK_REPLAY_ALL,
94 };
95 
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 			   struct btrfs_inode *inode,
98 			   int inode_only,
99 			   struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 			     struct btrfs_root *root,
102 			     struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 				       struct btrfs_root *root,
105 				       struct btrfs_root *log,
106 				       struct btrfs_path *path,
107 				       u64 dirid, int del_all);
108 static void wait_log_commit(struct btrfs_root *root, int transid);
109 
110 /*
111  * tree logging is a special write ahead log used to make sure that
112  * fsyncs and O_SYNCs can happen without doing full tree commits.
113  *
114  * Full tree commits are expensive because they require commonly
115  * modified blocks to be recowed, creating many dirty pages in the
116  * extent tree an 4x-6x higher write load than ext3.
117  *
118  * Instead of doing a tree commit on every fsync, we use the
119  * key ranges and transaction ids to find items for a given file or directory
120  * that have changed in this transaction.  Those items are copied into
121  * a special tree (one per subvolume root), that tree is written to disk
122  * and then the fsync is considered complete.
123  *
124  * After a crash, items are copied out of the log-tree back into the
125  * subvolume tree.  Any file data extents found are recorded in the extent
126  * allocation tree, and the log-tree freed.
127  *
128  * The log tree is read three times, once to pin down all the extents it is
129  * using in ram and once, once to create all the inodes logged in the tree
130  * and once to do all the other items.
131  */
132 
133 /*
134  * start a sub transaction and setup the log tree
135  * this increments the log tree writer count to make the people
136  * syncing the tree wait for us to finish
137  */
138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 			   struct btrfs_root *root,
140 			   struct btrfs_log_ctx *ctx)
141 {
142 	struct btrfs_fs_info *fs_info = root->fs_info;
143 	struct btrfs_root *tree_root = fs_info->tree_root;
144 	const bool zoned = btrfs_is_zoned(fs_info);
145 	int ret = 0;
146 	bool created = false;
147 
148 	/*
149 	 * First check if the log root tree was already created. If not, create
150 	 * it before locking the root's log_mutex, just to keep lockdep happy.
151 	 */
152 	if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
153 		mutex_lock(&tree_root->log_mutex);
154 		if (!fs_info->log_root_tree) {
155 			ret = btrfs_init_log_root_tree(trans, fs_info);
156 			if (!ret) {
157 				set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
158 				created = true;
159 			}
160 		}
161 		mutex_unlock(&tree_root->log_mutex);
162 		if (ret)
163 			return ret;
164 	}
165 
166 	mutex_lock(&root->log_mutex);
167 
168 again:
169 	if (root->log_root) {
170 		int index = (root->log_transid + 1) % 2;
171 
172 		if (btrfs_need_log_full_commit(trans)) {
173 			ret = -EAGAIN;
174 			goto out;
175 		}
176 
177 		if (zoned && atomic_read(&root->log_commit[index])) {
178 			wait_log_commit(root, root->log_transid - 1);
179 			goto again;
180 		}
181 
182 		if (!root->log_start_pid) {
183 			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 			root->log_start_pid = current->pid;
185 		} else if (root->log_start_pid != current->pid) {
186 			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
187 		}
188 	} else {
189 		/*
190 		 * This means fs_info->log_root_tree was already created
191 		 * for some other FS trees. Do the full commit not to mix
192 		 * nodes from multiple log transactions to do sequential
193 		 * writing.
194 		 */
195 		if (zoned && !created) {
196 			ret = -EAGAIN;
197 			goto out;
198 		}
199 
200 		ret = btrfs_add_log_tree(trans, root);
201 		if (ret)
202 			goto out;
203 
204 		set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
205 		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
206 		root->log_start_pid = current->pid;
207 	}
208 
209 	atomic_inc(&root->log_writers);
210 	if (!ctx->logging_new_name) {
211 		int index = root->log_transid % 2;
212 		list_add_tail(&ctx->list, &root->log_ctxs[index]);
213 		ctx->log_transid = root->log_transid;
214 	}
215 
216 out:
217 	mutex_unlock(&root->log_mutex);
218 	return ret;
219 }
220 
221 /*
222  * returns 0 if there was a log transaction running and we were able
223  * to join, or returns -ENOENT if there were not transactions
224  * in progress
225  */
226 static int join_running_log_trans(struct btrfs_root *root)
227 {
228 	const bool zoned = btrfs_is_zoned(root->fs_info);
229 	int ret = -ENOENT;
230 
231 	if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
232 		return ret;
233 
234 	mutex_lock(&root->log_mutex);
235 again:
236 	if (root->log_root) {
237 		int index = (root->log_transid + 1) % 2;
238 
239 		ret = 0;
240 		if (zoned && atomic_read(&root->log_commit[index])) {
241 			wait_log_commit(root, root->log_transid - 1);
242 			goto again;
243 		}
244 		atomic_inc(&root->log_writers);
245 	}
246 	mutex_unlock(&root->log_mutex);
247 	return ret;
248 }
249 
250 /*
251  * This either makes the current running log transaction wait
252  * until you call btrfs_end_log_trans() or it makes any future
253  * log transactions wait until you call btrfs_end_log_trans()
254  */
255 void btrfs_pin_log_trans(struct btrfs_root *root)
256 {
257 	atomic_inc(&root->log_writers);
258 }
259 
260 /*
261  * indicate we're done making changes to the log tree
262  * and wake up anyone waiting to do a sync
263  */
264 void btrfs_end_log_trans(struct btrfs_root *root)
265 {
266 	if (atomic_dec_and_test(&root->log_writers)) {
267 		/* atomic_dec_and_test implies a barrier */
268 		cond_wake_up_nomb(&root->log_writer_wait);
269 	}
270 }
271 
272 static int btrfs_write_tree_block(struct extent_buffer *buf)
273 {
274 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
275 					buf->start + buf->len - 1);
276 }
277 
278 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
279 {
280 	filemap_fdatawait_range(buf->pages[0]->mapping,
281 			        buf->start, buf->start + buf->len - 1);
282 }
283 
284 /*
285  * the walk control struct is used to pass state down the chain when
286  * processing the log tree.  The stage field tells us which part
287  * of the log tree processing we are currently doing.  The others
288  * are state fields used for that specific part
289  */
290 struct walk_control {
291 	/* should we free the extent on disk when done?  This is used
292 	 * at transaction commit time while freeing a log tree
293 	 */
294 	int free;
295 
296 	/* should we write out the extent buffer?  This is used
297 	 * while flushing the log tree to disk during a sync
298 	 */
299 	int write;
300 
301 	/* should we wait for the extent buffer io to finish?  Also used
302 	 * while flushing the log tree to disk for a sync
303 	 */
304 	int wait;
305 
306 	/* pin only walk, we record which extents on disk belong to the
307 	 * log trees
308 	 */
309 	int pin;
310 
311 	/* what stage of the replay code we're currently in */
312 	int stage;
313 
314 	/*
315 	 * Ignore any items from the inode currently being processed. Needs
316 	 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
317 	 * the LOG_WALK_REPLAY_INODES stage.
318 	 */
319 	bool ignore_cur_inode;
320 
321 	/* the root we are currently replaying */
322 	struct btrfs_root *replay_dest;
323 
324 	/* the trans handle for the current replay */
325 	struct btrfs_trans_handle *trans;
326 
327 	/* the function that gets used to process blocks we find in the
328 	 * tree.  Note the extent_buffer might not be up to date when it is
329 	 * passed in, and it must be checked or read if you need the data
330 	 * inside it
331 	 */
332 	int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
333 			    struct walk_control *wc, u64 gen, int level);
334 };
335 
336 /*
337  * process_func used to pin down extents, write them or wait on them
338  */
339 static int process_one_buffer(struct btrfs_root *log,
340 			      struct extent_buffer *eb,
341 			      struct walk_control *wc, u64 gen, int level)
342 {
343 	struct btrfs_fs_info *fs_info = log->fs_info;
344 	int ret = 0;
345 
346 	/*
347 	 * If this fs is mixed then we need to be able to process the leaves to
348 	 * pin down any logged extents, so we have to read the block.
349 	 */
350 	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
351 		ret = btrfs_read_buffer(eb, gen, level, NULL);
352 		if (ret)
353 			return ret;
354 	}
355 
356 	if (wc->pin)
357 		ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
358 						      eb->len);
359 
360 	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
361 		if (wc->pin && btrfs_header_level(eb) == 0)
362 			ret = btrfs_exclude_logged_extents(eb);
363 		if (wc->write)
364 			btrfs_write_tree_block(eb);
365 		if (wc->wait)
366 			btrfs_wait_tree_block_writeback(eb);
367 	}
368 	return ret;
369 }
370 
371 static int do_overwrite_item(struct btrfs_trans_handle *trans,
372 			     struct btrfs_root *root,
373 			     struct btrfs_path *path,
374 			     struct extent_buffer *eb, int slot,
375 			     struct btrfs_key *key)
376 {
377 	int ret;
378 	u32 item_size;
379 	u64 saved_i_size = 0;
380 	int save_old_i_size = 0;
381 	unsigned long src_ptr;
382 	unsigned long dst_ptr;
383 	int overwrite_root = 0;
384 	bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
385 
386 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
387 		overwrite_root = 1;
388 
389 	item_size = btrfs_item_size_nr(eb, slot);
390 	src_ptr = btrfs_item_ptr_offset(eb, slot);
391 
392 	/* Our caller must have done a search for the key for us. */
393 	ASSERT(path->nodes[0] != NULL);
394 
395 	/*
396 	 * And the slot must point to the exact key or the slot where the key
397 	 * should be at (the first item with a key greater than 'key')
398 	 */
399 	if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
400 		struct btrfs_key found_key;
401 
402 		btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
403 		ret = btrfs_comp_cpu_keys(&found_key, key);
404 		ASSERT(ret >= 0);
405 	} else {
406 		ret = 1;
407 	}
408 
409 	if (ret == 0) {
410 		char *src_copy;
411 		char *dst_copy;
412 		u32 dst_size = btrfs_item_size_nr(path->nodes[0],
413 						  path->slots[0]);
414 		if (dst_size != item_size)
415 			goto insert;
416 
417 		if (item_size == 0) {
418 			btrfs_release_path(path);
419 			return 0;
420 		}
421 		dst_copy = kmalloc(item_size, GFP_NOFS);
422 		src_copy = kmalloc(item_size, GFP_NOFS);
423 		if (!dst_copy || !src_copy) {
424 			btrfs_release_path(path);
425 			kfree(dst_copy);
426 			kfree(src_copy);
427 			return -ENOMEM;
428 		}
429 
430 		read_extent_buffer(eb, src_copy, src_ptr, item_size);
431 
432 		dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
433 		read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
434 				   item_size);
435 		ret = memcmp(dst_copy, src_copy, item_size);
436 
437 		kfree(dst_copy);
438 		kfree(src_copy);
439 		/*
440 		 * they have the same contents, just return, this saves
441 		 * us from cowing blocks in the destination tree and doing
442 		 * extra writes that may not have been done by a previous
443 		 * sync
444 		 */
445 		if (ret == 0) {
446 			btrfs_release_path(path);
447 			return 0;
448 		}
449 
450 		/*
451 		 * We need to load the old nbytes into the inode so when we
452 		 * replay the extents we've logged we get the right nbytes.
453 		 */
454 		if (inode_item) {
455 			struct btrfs_inode_item *item;
456 			u64 nbytes;
457 			u32 mode;
458 
459 			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
460 					      struct btrfs_inode_item);
461 			nbytes = btrfs_inode_nbytes(path->nodes[0], item);
462 			item = btrfs_item_ptr(eb, slot,
463 					      struct btrfs_inode_item);
464 			btrfs_set_inode_nbytes(eb, item, nbytes);
465 
466 			/*
467 			 * If this is a directory we need to reset the i_size to
468 			 * 0 so that we can set it up properly when replaying
469 			 * the rest of the items in this log.
470 			 */
471 			mode = btrfs_inode_mode(eb, item);
472 			if (S_ISDIR(mode))
473 				btrfs_set_inode_size(eb, item, 0);
474 		}
475 	} else if (inode_item) {
476 		struct btrfs_inode_item *item;
477 		u32 mode;
478 
479 		/*
480 		 * New inode, set nbytes to 0 so that the nbytes comes out
481 		 * properly when we replay the extents.
482 		 */
483 		item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
484 		btrfs_set_inode_nbytes(eb, item, 0);
485 
486 		/*
487 		 * If this is a directory we need to reset the i_size to 0 so
488 		 * that we can set it up properly when replaying the rest of
489 		 * the items in this log.
490 		 */
491 		mode = btrfs_inode_mode(eb, item);
492 		if (S_ISDIR(mode))
493 			btrfs_set_inode_size(eb, item, 0);
494 	}
495 insert:
496 	btrfs_release_path(path);
497 	/* try to insert the key into the destination tree */
498 	path->skip_release_on_error = 1;
499 	ret = btrfs_insert_empty_item(trans, root, path,
500 				      key, item_size);
501 	path->skip_release_on_error = 0;
502 
503 	/* make sure any existing item is the correct size */
504 	if (ret == -EEXIST || ret == -EOVERFLOW) {
505 		u32 found_size;
506 		found_size = btrfs_item_size_nr(path->nodes[0],
507 						path->slots[0]);
508 		if (found_size > item_size)
509 			btrfs_truncate_item(path, item_size, 1);
510 		else if (found_size < item_size)
511 			btrfs_extend_item(path, item_size - found_size);
512 	} else if (ret) {
513 		return ret;
514 	}
515 	dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
516 					path->slots[0]);
517 
518 	/* don't overwrite an existing inode if the generation number
519 	 * was logged as zero.  This is done when the tree logging code
520 	 * is just logging an inode to make sure it exists after recovery.
521 	 *
522 	 * Also, don't overwrite i_size on directories during replay.
523 	 * log replay inserts and removes directory items based on the
524 	 * state of the tree found in the subvolume, and i_size is modified
525 	 * as it goes
526 	 */
527 	if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
528 		struct btrfs_inode_item *src_item;
529 		struct btrfs_inode_item *dst_item;
530 
531 		src_item = (struct btrfs_inode_item *)src_ptr;
532 		dst_item = (struct btrfs_inode_item *)dst_ptr;
533 
534 		if (btrfs_inode_generation(eb, src_item) == 0) {
535 			struct extent_buffer *dst_eb = path->nodes[0];
536 			const u64 ino_size = btrfs_inode_size(eb, src_item);
537 
538 			/*
539 			 * For regular files an ino_size == 0 is used only when
540 			 * logging that an inode exists, as part of a directory
541 			 * fsync, and the inode wasn't fsynced before. In this
542 			 * case don't set the size of the inode in the fs/subvol
543 			 * tree, otherwise we would be throwing valid data away.
544 			 */
545 			if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
546 			    S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
547 			    ino_size != 0)
548 				btrfs_set_inode_size(dst_eb, dst_item, ino_size);
549 			goto no_copy;
550 		}
551 
552 		if (overwrite_root &&
553 		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
554 		    S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
555 			save_old_i_size = 1;
556 			saved_i_size = btrfs_inode_size(path->nodes[0],
557 							dst_item);
558 		}
559 	}
560 
561 	copy_extent_buffer(path->nodes[0], eb, dst_ptr,
562 			   src_ptr, item_size);
563 
564 	if (save_old_i_size) {
565 		struct btrfs_inode_item *dst_item;
566 		dst_item = (struct btrfs_inode_item *)dst_ptr;
567 		btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
568 	}
569 
570 	/* make sure the generation is filled in */
571 	if (key->type == BTRFS_INODE_ITEM_KEY) {
572 		struct btrfs_inode_item *dst_item;
573 		dst_item = (struct btrfs_inode_item *)dst_ptr;
574 		if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
575 			btrfs_set_inode_generation(path->nodes[0], dst_item,
576 						   trans->transid);
577 		}
578 	}
579 no_copy:
580 	btrfs_mark_buffer_dirty(path->nodes[0]);
581 	btrfs_release_path(path);
582 	return 0;
583 }
584 
585 /*
586  * Item overwrite used by replay and tree logging.  eb, slot and key all refer
587  * to the src data we are copying out.
588  *
589  * root is the tree we are copying into, and path is a scratch
590  * path for use in this function (it should be released on entry and
591  * will be released on exit).
592  *
593  * If the key is already in the destination tree the existing item is
594  * overwritten.  If the existing item isn't big enough, it is extended.
595  * If it is too large, it is truncated.
596  *
597  * If the key isn't in the destination yet, a new item is inserted.
598  */
599 static int overwrite_item(struct btrfs_trans_handle *trans,
600 			  struct btrfs_root *root,
601 			  struct btrfs_path *path,
602 			  struct extent_buffer *eb, int slot,
603 			  struct btrfs_key *key)
604 {
605 	int ret;
606 
607 	/* Look for the key in the destination tree. */
608 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
609 	if (ret < 0)
610 		return ret;
611 
612 	return do_overwrite_item(trans, root, path, eb, slot, key);
613 }
614 
615 /*
616  * simple helper to read an inode off the disk from a given root
617  * This can only be called for subvolume roots and not for the log
618  */
619 static noinline struct inode *read_one_inode(struct btrfs_root *root,
620 					     u64 objectid)
621 {
622 	struct inode *inode;
623 
624 	inode = btrfs_iget(root->fs_info->sb, objectid, root);
625 	if (IS_ERR(inode))
626 		inode = NULL;
627 	return inode;
628 }
629 
630 /* replays a single extent in 'eb' at 'slot' with 'key' into the
631  * subvolume 'root'.  path is released on entry and should be released
632  * on exit.
633  *
634  * extents in the log tree have not been allocated out of the extent
635  * tree yet.  So, this completes the allocation, taking a reference
636  * as required if the extent already exists or creating a new extent
637  * if it isn't in the extent allocation tree yet.
638  *
639  * The extent is inserted into the file, dropping any existing extents
640  * from the file that overlap the new one.
641  */
642 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
643 				      struct btrfs_root *root,
644 				      struct btrfs_path *path,
645 				      struct extent_buffer *eb, int slot,
646 				      struct btrfs_key *key)
647 {
648 	struct btrfs_drop_extents_args drop_args = { 0 };
649 	struct btrfs_fs_info *fs_info = root->fs_info;
650 	int found_type;
651 	u64 extent_end;
652 	u64 start = key->offset;
653 	u64 nbytes = 0;
654 	struct btrfs_file_extent_item *item;
655 	struct inode *inode = NULL;
656 	unsigned long size;
657 	int ret = 0;
658 
659 	item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
660 	found_type = btrfs_file_extent_type(eb, item);
661 
662 	if (found_type == BTRFS_FILE_EXTENT_REG ||
663 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
664 		nbytes = btrfs_file_extent_num_bytes(eb, item);
665 		extent_end = start + nbytes;
666 
667 		/*
668 		 * We don't add to the inodes nbytes if we are prealloc or a
669 		 * hole.
670 		 */
671 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
672 			nbytes = 0;
673 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
674 		size = btrfs_file_extent_ram_bytes(eb, item);
675 		nbytes = btrfs_file_extent_ram_bytes(eb, item);
676 		extent_end = ALIGN(start + size,
677 				   fs_info->sectorsize);
678 	} else {
679 		ret = 0;
680 		goto out;
681 	}
682 
683 	inode = read_one_inode(root, key->objectid);
684 	if (!inode) {
685 		ret = -EIO;
686 		goto out;
687 	}
688 
689 	/*
690 	 * first check to see if we already have this extent in the
691 	 * file.  This must be done before the btrfs_drop_extents run
692 	 * so we don't try to drop this extent.
693 	 */
694 	ret = btrfs_lookup_file_extent(trans, root, path,
695 			btrfs_ino(BTRFS_I(inode)), start, 0);
696 
697 	if (ret == 0 &&
698 	    (found_type == BTRFS_FILE_EXTENT_REG ||
699 	     found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
700 		struct btrfs_file_extent_item cmp1;
701 		struct btrfs_file_extent_item cmp2;
702 		struct btrfs_file_extent_item *existing;
703 		struct extent_buffer *leaf;
704 
705 		leaf = path->nodes[0];
706 		existing = btrfs_item_ptr(leaf, path->slots[0],
707 					  struct btrfs_file_extent_item);
708 
709 		read_extent_buffer(eb, &cmp1, (unsigned long)item,
710 				   sizeof(cmp1));
711 		read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
712 				   sizeof(cmp2));
713 
714 		/*
715 		 * we already have a pointer to this exact extent,
716 		 * we don't have to do anything
717 		 */
718 		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
719 			btrfs_release_path(path);
720 			goto out;
721 		}
722 	}
723 	btrfs_release_path(path);
724 
725 	/* drop any overlapping extents */
726 	drop_args.start = start;
727 	drop_args.end = extent_end;
728 	drop_args.drop_cache = true;
729 	ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
730 	if (ret)
731 		goto out;
732 
733 	if (found_type == BTRFS_FILE_EXTENT_REG ||
734 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
735 		u64 offset;
736 		unsigned long dest_offset;
737 		struct btrfs_key ins;
738 
739 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
740 		    btrfs_fs_incompat(fs_info, NO_HOLES))
741 			goto update_inode;
742 
743 		ret = btrfs_insert_empty_item(trans, root, path, key,
744 					      sizeof(*item));
745 		if (ret)
746 			goto out;
747 		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
748 						    path->slots[0]);
749 		copy_extent_buffer(path->nodes[0], eb, dest_offset,
750 				(unsigned long)item,  sizeof(*item));
751 
752 		ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
753 		ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
754 		ins.type = BTRFS_EXTENT_ITEM_KEY;
755 		offset = key->offset - btrfs_file_extent_offset(eb, item);
756 
757 		/*
758 		 * Manually record dirty extent, as here we did a shallow
759 		 * file extent item copy and skip normal backref update,
760 		 * but modifying extent tree all by ourselves.
761 		 * So need to manually record dirty extent for qgroup,
762 		 * as the owner of the file extent changed from log tree
763 		 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
764 		 */
765 		ret = btrfs_qgroup_trace_extent(trans,
766 				btrfs_file_extent_disk_bytenr(eb, item),
767 				btrfs_file_extent_disk_num_bytes(eb, item),
768 				GFP_NOFS);
769 		if (ret < 0)
770 			goto out;
771 
772 		if (ins.objectid > 0) {
773 			struct btrfs_ref ref = { 0 };
774 			u64 csum_start;
775 			u64 csum_end;
776 			LIST_HEAD(ordered_sums);
777 
778 			/*
779 			 * is this extent already allocated in the extent
780 			 * allocation tree?  If so, just add a reference
781 			 */
782 			ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
783 						ins.offset);
784 			if (ret < 0) {
785 				goto out;
786 			} else if (ret == 0) {
787 				btrfs_init_generic_ref(&ref,
788 						BTRFS_ADD_DELAYED_REF,
789 						ins.objectid, ins.offset, 0);
790 				btrfs_init_data_ref(&ref,
791 						root->root_key.objectid,
792 						key->objectid, offset, 0, false);
793 				ret = btrfs_inc_extent_ref(trans, &ref);
794 				if (ret)
795 					goto out;
796 			} else {
797 				/*
798 				 * insert the extent pointer in the extent
799 				 * allocation tree
800 				 */
801 				ret = btrfs_alloc_logged_file_extent(trans,
802 						root->root_key.objectid,
803 						key->objectid, offset, &ins);
804 				if (ret)
805 					goto out;
806 			}
807 			btrfs_release_path(path);
808 
809 			if (btrfs_file_extent_compression(eb, item)) {
810 				csum_start = ins.objectid;
811 				csum_end = csum_start + ins.offset;
812 			} else {
813 				csum_start = ins.objectid +
814 					btrfs_file_extent_offset(eb, item);
815 				csum_end = csum_start +
816 					btrfs_file_extent_num_bytes(eb, item);
817 			}
818 
819 			ret = btrfs_lookup_csums_range(root->log_root,
820 						csum_start, csum_end - 1,
821 						&ordered_sums, 0);
822 			if (ret)
823 				goto out;
824 			/*
825 			 * Now delete all existing cums in the csum root that
826 			 * cover our range. We do this because we can have an
827 			 * extent that is completely referenced by one file
828 			 * extent item and partially referenced by another
829 			 * file extent item (like after using the clone or
830 			 * extent_same ioctls). In this case if we end up doing
831 			 * the replay of the one that partially references the
832 			 * extent first, and we do not do the csum deletion
833 			 * below, we can get 2 csum items in the csum tree that
834 			 * overlap each other. For example, imagine our log has
835 			 * the two following file extent items:
836 			 *
837 			 * key (257 EXTENT_DATA 409600)
838 			 *     extent data disk byte 12845056 nr 102400
839 			 *     extent data offset 20480 nr 20480 ram 102400
840 			 *
841 			 * key (257 EXTENT_DATA 819200)
842 			 *     extent data disk byte 12845056 nr 102400
843 			 *     extent data offset 0 nr 102400 ram 102400
844 			 *
845 			 * Where the second one fully references the 100K extent
846 			 * that starts at disk byte 12845056, and the log tree
847 			 * has a single csum item that covers the entire range
848 			 * of the extent:
849 			 *
850 			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
851 			 *
852 			 * After the first file extent item is replayed, the
853 			 * csum tree gets the following csum item:
854 			 *
855 			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
856 			 *
857 			 * Which covers the 20K sub-range starting at offset 20K
858 			 * of our extent. Now when we replay the second file
859 			 * extent item, if we do not delete existing csum items
860 			 * that cover any of its blocks, we end up getting two
861 			 * csum items in our csum tree that overlap each other:
862 			 *
863 			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
864 			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
865 			 *
866 			 * Which is a problem, because after this anyone trying
867 			 * to lookup up for the checksum of any block of our
868 			 * extent starting at an offset of 40K or higher, will
869 			 * end up looking at the second csum item only, which
870 			 * does not contain the checksum for any block starting
871 			 * at offset 40K or higher of our extent.
872 			 */
873 			while (!list_empty(&ordered_sums)) {
874 				struct btrfs_ordered_sum *sums;
875 				sums = list_entry(ordered_sums.next,
876 						struct btrfs_ordered_sum,
877 						list);
878 				if (!ret)
879 					ret = btrfs_del_csums(trans,
880 							      fs_info->csum_root,
881 							      sums->bytenr,
882 							      sums->len);
883 				if (!ret)
884 					ret = btrfs_csum_file_blocks(trans,
885 						fs_info->csum_root, sums);
886 				list_del(&sums->list);
887 				kfree(sums);
888 			}
889 			if (ret)
890 				goto out;
891 		} else {
892 			btrfs_release_path(path);
893 		}
894 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
895 		/* inline extents are easy, we just overwrite them */
896 		ret = overwrite_item(trans, root, path, eb, slot, key);
897 		if (ret)
898 			goto out;
899 	}
900 
901 	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
902 						extent_end - start);
903 	if (ret)
904 		goto out;
905 
906 update_inode:
907 	btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
908 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
909 out:
910 	if (inode)
911 		iput(inode);
912 	return ret;
913 }
914 
915 /*
916  * when cleaning up conflicts between the directory names in the
917  * subvolume, directory names in the log and directory names in the
918  * inode back references, we may have to unlink inodes from directories.
919  *
920  * This is a helper function to do the unlink of a specific directory
921  * item
922  */
923 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
924 				      struct btrfs_path *path,
925 				      struct btrfs_inode *dir,
926 				      struct btrfs_dir_item *di)
927 {
928 	struct btrfs_root *root = dir->root;
929 	struct inode *inode;
930 	char *name;
931 	int name_len;
932 	struct extent_buffer *leaf;
933 	struct btrfs_key location;
934 	int ret;
935 
936 	leaf = path->nodes[0];
937 
938 	btrfs_dir_item_key_to_cpu(leaf, di, &location);
939 	name_len = btrfs_dir_name_len(leaf, di);
940 	name = kmalloc(name_len, GFP_NOFS);
941 	if (!name)
942 		return -ENOMEM;
943 
944 	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
945 	btrfs_release_path(path);
946 
947 	inode = read_one_inode(root, location.objectid);
948 	if (!inode) {
949 		ret = -EIO;
950 		goto out;
951 	}
952 
953 	ret = link_to_fixup_dir(trans, root, path, location.objectid);
954 	if (ret)
955 		goto out;
956 
957 	ret = btrfs_unlink_inode(trans, dir, BTRFS_I(inode), name,
958 			name_len);
959 	if (ret)
960 		goto out;
961 	else
962 		ret = btrfs_run_delayed_items(trans);
963 out:
964 	kfree(name);
965 	iput(inode);
966 	return ret;
967 }
968 
969 /*
970  * See if a given name and sequence number found in an inode back reference are
971  * already in a directory and correctly point to this inode.
972  *
973  * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
974  * exists.
975  */
976 static noinline int inode_in_dir(struct btrfs_root *root,
977 				 struct btrfs_path *path,
978 				 u64 dirid, u64 objectid, u64 index,
979 				 const char *name, int name_len)
980 {
981 	struct btrfs_dir_item *di;
982 	struct btrfs_key location;
983 	int ret = 0;
984 
985 	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
986 					 index, name, name_len, 0);
987 	if (IS_ERR(di)) {
988 		ret = PTR_ERR(di);
989 		goto out;
990 	} else if (di) {
991 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
992 		if (location.objectid != objectid)
993 			goto out;
994 	} else {
995 		goto out;
996 	}
997 
998 	btrfs_release_path(path);
999 	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
1000 	if (IS_ERR(di)) {
1001 		ret = PTR_ERR(di);
1002 		goto out;
1003 	} else if (di) {
1004 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1005 		if (location.objectid == objectid)
1006 			ret = 1;
1007 	}
1008 out:
1009 	btrfs_release_path(path);
1010 	return ret;
1011 }
1012 
1013 /*
1014  * helper function to check a log tree for a named back reference in
1015  * an inode.  This is used to decide if a back reference that is
1016  * found in the subvolume conflicts with what we find in the log.
1017  *
1018  * inode backreferences may have multiple refs in a single item,
1019  * during replay we process one reference at a time, and we don't
1020  * want to delete valid links to a file from the subvolume if that
1021  * link is also in the log.
1022  */
1023 static noinline int backref_in_log(struct btrfs_root *log,
1024 				   struct btrfs_key *key,
1025 				   u64 ref_objectid,
1026 				   const char *name, int namelen)
1027 {
1028 	struct btrfs_path *path;
1029 	int ret;
1030 
1031 	path = btrfs_alloc_path();
1032 	if (!path)
1033 		return -ENOMEM;
1034 
1035 	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1036 	if (ret < 0) {
1037 		goto out;
1038 	} else if (ret == 1) {
1039 		ret = 0;
1040 		goto out;
1041 	}
1042 
1043 	if (key->type == BTRFS_INODE_EXTREF_KEY)
1044 		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1045 						       path->slots[0],
1046 						       ref_objectid,
1047 						       name, namelen);
1048 	else
1049 		ret = !!btrfs_find_name_in_backref(path->nodes[0],
1050 						   path->slots[0],
1051 						   name, namelen);
1052 out:
1053 	btrfs_free_path(path);
1054 	return ret;
1055 }
1056 
1057 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1058 				  struct btrfs_root *root,
1059 				  struct btrfs_path *path,
1060 				  struct btrfs_root *log_root,
1061 				  struct btrfs_inode *dir,
1062 				  struct btrfs_inode *inode,
1063 				  u64 inode_objectid, u64 parent_objectid,
1064 				  u64 ref_index, char *name, int namelen,
1065 				  int *search_done)
1066 {
1067 	int ret;
1068 	char *victim_name;
1069 	int victim_name_len;
1070 	struct extent_buffer *leaf;
1071 	struct btrfs_dir_item *di;
1072 	struct btrfs_key search_key;
1073 	struct btrfs_inode_extref *extref;
1074 
1075 again:
1076 	/* Search old style refs */
1077 	search_key.objectid = inode_objectid;
1078 	search_key.type = BTRFS_INODE_REF_KEY;
1079 	search_key.offset = parent_objectid;
1080 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1081 	if (ret == 0) {
1082 		struct btrfs_inode_ref *victim_ref;
1083 		unsigned long ptr;
1084 		unsigned long ptr_end;
1085 
1086 		leaf = path->nodes[0];
1087 
1088 		/* are we trying to overwrite a back ref for the root directory
1089 		 * if so, just jump out, we're done
1090 		 */
1091 		if (search_key.objectid == search_key.offset)
1092 			return 1;
1093 
1094 		/* check all the names in this back reference to see
1095 		 * if they are in the log.  if so, we allow them to stay
1096 		 * otherwise they must be unlinked as a conflict
1097 		 */
1098 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1099 		ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1100 		while (ptr < ptr_end) {
1101 			victim_ref = (struct btrfs_inode_ref *)ptr;
1102 			victim_name_len = btrfs_inode_ref_name_len(leaf,
1103 								   victim_ref);
1104 			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1105 			if (!victim_name)
1106 				return -ENOMEM;
1107 
1108 			read_extent_buffer(leaf, victim_name,
1109 					   (unsigned long)(victim_ref + 1),
1110 					   victim_name_len);
1111 
1112 			ret = backref_in_log(log_root, &search_key,
1113 					     parent_objectid, victim_name,
1114 					     victim_name_len);
1115 			if (ret < 0) {
1116 				kfree(victim_name);
1117 				return ret;
1118 			} else if (!ret) {
1119 				inc_nlink(&inode->vfs_inode);
1120 				btrfs_release_path(path);
1121 
1122 				ret = btrfs_unlink_inode(trans, dir, inode,
1123 						victim_name, victim_name_len);
1124 				kfree(victim_name);
1125 				if (ret)
1126 					return ret;
1127 				ret = btrfs_run_delayed_items(trans);
1128 				if (ret)
1129 					return ret;
1130 				*search_done = 1;
1131 				goto again;
1132 			}
1133 			kfree(victim_name);
1134 
1135 			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1136 		}
1137 
1138 		/*
1139 		 * NOTE: we have searched root tree and checked the
1140 		 * corresponding ref, it does not need to check again.
1141 		 */
1142 		*search_done = 1;
1143 	}
1144 	btrfs_release_path(path);
1145 
1146 	/* Same search but for extended refs */
1147 	extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1148 					   inode_objectid, parent_objectid, 0,
1149 					   0);
1150 	if (!IS_ERR_OR_NULL(extref)) {
1151 		u32 item_size;
1152 		u32 cur_offset = 0;
1153 		unsigned long base;
1154 		struct inode *victim_parent;
1155 
1156 		leaf = path->nodes[0];
1157 
1158 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1159 		base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1160 
1161 		while (cur_offset < item_size) {
1162 			extref = (struct btrfs_inode_extref *)(base + cur_offset);
1163 
1164 			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1165 
1166 			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1167 				goto next;
1168 
1169 			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1170 			if (!victim_name)
1171 				return -ENOMEM;
1172 			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1173 					   victim_name_len);
1174 
1175 			search_key.objectid = inode_objectid;
1176 			search_key.type = BTRFS_INODE_EXTREF_KEY;
1177 			search_key.offset = btrfs_extref_hash(parent_objectid,
1178 							      victim_name,
1179 							      victim_name_len);
1180 			ret = backref_in_log(log_root, &search_key,
1181 					     parent_objectid, victim_name,
1182 					     victim_name_len);
1183 			if (ret < 0) {
1184 				return ret;
1185 			} else if (!ret) {
1186 				ret = -ENOENT;
1187 				victim_parent = read_one_inode(root,
1188 						parent_objectid);
1189 				if (victim_parent) {
1190 					inc_nlink(&inode->vfs_inode);
1191 					btrfs_release_path(path);
1192 
1193 					ret = btrfs_unlink_inode(trans,
1194 							BTRFS_I(victim_parent),
1195 							inode,
1196 							victim_name,
1197 							victim_name_len);
1198 					if (!ret)
1199 						ret = btrfs_run_delayed_items(
1200 								  trans);
1201 				}
1202 				iput(victim_parent);
1203 				kfree(victim_name);
1204 				if (ret)
1205 					return ret;
1206 				*search_done = 1;
1207 				goto again;
1208 			}
1209 			kfree(victim_name);
1210 next:
1211 			cur_offset += victim_name_len + sizeof(*extref);
1212 		}
1213 		*search_done = 1;
1214 	}
1215 	btrfs_release_path(path);
1216 
1217 	/* look for a conflicting sequence number */
1218 	di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1219 					 ref_index, name, namelen, 0);
1220 	if (IS_ERR(di)) {
1221 		return PTR_ERR(di);
1222 	} else if (di) {
1223 		ret = drop_one_dir_item(trans, path, dir, di);
1224 		if (ret)
1225 			return ret;
1226 	}
1227 	btrfs_release_path(path);
1228 
1229 	/* look for a conflicting name */
1230 	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1231 				   name, namelen, 0);
1232 	if (IS_ERR(di)) {
1233 		return PTR_ERR(di);
1234 	} else if (di) {
1235 		ret = drop_one_dir_item(trans, path, dir, di);
1236 		if (ret)
1237 			return ret;
1238 	}
1239 	btrfs_release_path(path);
1240 
1241 	return 0;
1242 }
1243 
1244 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1245 			     u32 *namelen, char **name, u64 *index,
1246 			     u64 *parent_objectid)
1247 {
1248 	struct btrfs_inode_extref *extref;
1249 
1250 	extref = (struct btrfs_inode_extref *)ref_ptr;
1251 
1252 	*namelen = btrfs_inode_extref_name_len(eb, extref);
1253 	*name = kmalloc(*namelen, GFP_NOFS);
1254 	if (*name == NULL)
1255 		return -ENOMEM;
1256 
1257 	read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1258 			   *namelen);
1259 
1260 	if (index)
1261 		*index = btrfs_inode_extref_index(eb, extref);
1262 	if (parent_objectid)
1263 		*parent_objectid = btrfs_inode_extref_parent(eb, extref);
1264 
1265 	return 0;
1266 }
1267 
1268 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1269 			  u32 *namelen, char **name, u64 *index)
1270 {
1271 	struct btrfs_inode_ref *ref;
1272 
1273 	ref = (struct btrfs_inode_ref *)ref_ptr;
1274 
1275 	*namelen = btrfs_inode_ref_name_len(eb, ref);
1276 	*name = kmalloc(*namelen, GFP_NOFS);
1277 	if (*name == NULL)
1278 		return -ENOMEM;
1279 
1280 	read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1281 
1282 	if (index)
1283 		*index = btrfs_inode_ref_index(eb, ref);
1284 
1285 	return 0;
1286 }
1287 
1288 /*
1289  * Take an inode reference item from the log tree and iterate all names from the
1290  * inode reference item in the subvolume tree with the same key (if it exists).
1291  * For any name that is not in the inode reference item from the log tree, do a
1292  * proper unlink of that name (that is, remove its entry from the inode
1293  * reference item and both dir index keys).
1294  */
1295 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1296 				 struct btrfs_root *root,
1297 				 struct btrfs_path *path,
1298 				 struct btrfs_inode *inode,
1299 				 struct extent_buffer *log_eb,
1300 				 int log_slot,
1301 				 struct btrfs_key *key)
1302 {
1303 	int ret;
1304 	unsigned long ref_ptr;
1305 	unsigned long ref_end;
1306 	struct extent_buffer *eb;
1307 
1308 again:
1309 	btrfs_release_path(path);
1310 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1311 	if (ret > 0) {
1312 		ret = 0;
1313 		goto out;
1314 	}
1315 	if (ret < 0)
1316 		goto out;
1317 
1318 	eb = path->nodes[0];
1319 	ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1320 	ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1321 	while (ref_ptr < ref_end) {
1322 		char *name = NULL;
1323 		int namelen;
1324 		u64 parent_id;
1325 
1326 		if (key->type == BTRFS_INODE_EXTREF_KEY) {
1327 			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1328 						NULL, &parent_id);
1329 		} else {
1330 			parent_id = key->offset;
1331 			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1332 					     NULL);
1333 		}
1334 		if (ret)
1335 			goto out;
1336 
1337 		if (key->type == BTRFS_INODE_EXTREF_KEY)
1338 			ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1339 							       parent_id, name,
1340 							       namelen);
1341 		else
1342 			ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1343 							   name, namelen);
1344 
1345 		if (!ret) {
1346 			struct inode *dir;
1347 
1348 			btrfs_release_path(path);
1349 			dir = read_one_inode(root, parent_id);
1350 			if (!dir) {
1351 				ret = -ENOENT;
1352 				kfree(name);
1353 				goto out;
1354 			}
1355 			ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
1356 						 inode, name, namelen);
1357 			kfree(name);
1358 			iput(dir);
1359 			if (ret)
1360 				goto out;
1361 			goto again;
1362 		}
1363 
1364 		kfree(name);
1365 		ref_ptr += namelen;
1366 		if (key->type == BTRFS_INODE_EXTREF_KEY)
1367 			ref_ptr += sizeof(struct btrfs_inode_extref);
1368 		else
1369 			ref_ptr += sizeof(struct btrfs_inode_ref);
1370 	}
1371 	ret = 0;
1372  out:
1373 	btrfs_release_path(path);
1374 	return ret;
1375 }
1376 
1377 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1378 				  const u8 ref_type, const char *name,
1379 				  const int namelen)
1380 {
1381 	struct btrfs_key key;
1382 	struct btrfs_path *path;
1383 	const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1384 	int ret;
1385 
1386 	path = btrfs_alloc_path();
1387 	if (!path)
1388 		return -ENOMEM;
1389 
1390 	key.objectid = btrfs_ino(BTRFS_I(inode));
1391 	key.type = ref_type;
1392 	if (key.type == BTRFS_INODE_REF_KEY)
1393 		key.offset = parent_id;
1394 	else
1395 		key.offset = btrfs_extref_hash(parent_id, name, namelen);
1396 
1397 	ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1398 	if (ret < 0)
1399 		goto out;
1400 	if (ret > 0) {
1401 		ret = 0;
1402 		goto out;
1403 	}
1404 	if (key.type == BTRFS_INODE_EXTREF_KEY)
1405 		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1406 				path->slots[0], parent_id, name, namelen);
1407 	else
1408 		ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1409 						   name, namelen);
1410 
1411 out:
1412 	btrfs_free_path(path);
1413 	return ret;
1414 }
1415 
1416 static int add_link(struct btrfs_trans_handle *trans,
1417 		    struct inode *dir, struct inode *inode, const char *name,
1418 		    int namelen, u64 ref_index)
1419 {
1420 	struct btrfs_root *root = BTRFS_I(dir)->root;
1421 	struct btrfs_dir_item *dir_item;
1422 	struct btrfs_key key;
1423 	struct btrfs_path *path;
1424 	struct inode *other_inode = NULL;
1425 	int ret;
1426 
1427 	path = btrfs_alloc_path();
1428 	if (!path)
1429 		return -ENOMEM;
1430 
1431 	dir_item = btrfs_lookup_dir_item(NULL, root, path,
1432 					 btrfs_ino(BTRFS_I(dir)),
1433 					 name, namelen, 0);
1434 	if (!dir_item) {
1435 		btrfs_release_path(path);
1436 		goto add_link;
1437 	} else if (IS_ERR(dir_item)) {
1438 		ret = PTR_ERR(dir_item);
1439 		goto out;
1440 	}
1441 
1442 	/*
1443 	 * Our inode's dentry collides with the dentry of another inode which is
1444 	 * in the log but not yet processed since it has a higher inode number.
1445 	 * So delete that other dentry.
1446 	 */
1447 	btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1448 	btrfs_release_path(path);
1449 	other_inode = read_one_inode(root, key.objectid);
1450 	if (!other_inode) {
1451 		ret = -ENOENT;
1452 		goto out;
1453 	}
1454 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(other_inode),
1455 				 name, namelen);
1456 	if (ret)
1457 		goto out;
1458 	/*
1459 	 * If we dropped the link count to 0, bump it so that later the iput()
1460 	 * on the inode will not free it. We will fixup the link count later.
1461 	 */
1462 	if (other_inode->i_nlink == 0)
1463 		inc_nlink(other_inode);
1464 
1465 	ret = btrfs_run_delayed_items(trans);
1466 	if (ret)
1467 		goto out;
1468 add_link:
1469 	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1470 			     name, namelen, 0, ref_index);
1471 out:
1472 	iput(other_inode);
1473 	btrfs_free_path(path);
1474 
1475 	return ret;
1476 }
1477 
1478 /*
1479  * replay one inode back reference item found in the log tree.
1480  * eb, slot and key refer to the buffer and key found in the log tree.
1481  * root is the destination we are replaying into, and path is for temp
1482  * use by this function.  (it should be released on return).
1483  */
1484 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1485 				  struct btrfs_root *root,
1486 				  struct btrfs_root *log,
1487 				  struct btrfs_path *path,
1488 				  struct extent_buffer *eb, int slot,
1489 				  struct btrfs_key *key)
1490 {
1491 	struct inode *dir = NULL;
1492 	struct inode *inode = NULL;
1493 	unsigned long ref_ptr;
1494 	unsigned long ref_end;
1495 	char *name = NULL;
1496 	int namelen;
1497 	int ret;
1498 	int search_done = 0;
1499 	int log_ref_ver = 0;
1500 	u64 parent_objectid;
1501 	u64 inode_objectid;
1502 	u64 ref_index = 0;
1503 	int ref_struct_size;
1504 
1505 	ref_ptr = btrfs_item_ptr_offset(eb, slot);
1506 	ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1507 
1508 	if (key->type == BTRFS_INODE_EXTREF_KEY) {
1509 		struct btrfs_inode_extref *r;
1510 
1511 		ref_struct_size = sizeof(struct btrfs_inode_extref);
1512 		log_ref_ver = 1;
1513 		r = (struct btrfs_inode_extref *)ref_ptr;
1514 		parent_objectid = btrfs_inode_extref_parent(eb, r);
1515 	} else {
1516 		ref_struct_size = sizeof(struct btrfs_inode_ref);
1517 		parent_objectid = key->offset;
1518 	}
1519 	inode_objectid = key->objectid;
1520 
1521 	/*
1522 	 * it is possible that we didn't log all the parent directories
1523 	 * for a given inode.  If we don't find the dir, just don't
1524 	 * copy the back ref in.  The link count fixup code will take
1525 	 * care of the rest
1526 	 */
1527 	dir = read_one_inode(root, parent_objectid);
1528 	if (!dir) {
1529 		ret = -ENOENT;
1530 		goto out;
1531 	}
1532 
1533 	inode = read_one_inode(root, inode_objectid);
1534 	if (!inode) {
1535 		ret = -EIO;
1536 		goto out;
1537 	}
1538 
1539 	while (ref_ptr < ref_end) {
1540 		if (log_ref_ver) {
1541 			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1542 						&ref_index, &parent_objectid);
1543 			/*
1544 			 * parent object can change from one array
1545 			 * item to another.
1546 			 */
1547 			if (!dir)
1548 				dir = read_one_inode(root, parent_objectid);
1549 			if (!dir) {
1550 				ret = -ENOENT;
1551 				goto out;
1552 			}
1553 		} else {
1554 			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1555 					     &ref_index);
1556 		}
1557 		if (ret)
1558 			goto out;
1559 
1560 		ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1561 				   btrfs_ino(BTRFS_I(inode)), ref_index,
1562 				   name, namelen);
1563 		if (ret < 0) {
1564 			goto out;
1565 		} else if (ret == 0) {
1566 			/*
1567 			 * look for a conflicting back reference in the
1568 			 * metadata. if we find one we have to unlink that name
1569 			 * of the file before we add our new link.  Later on, we
1570 			 * overwrite any existing back reference, and we don't
1571 			 * want to create dangling pointers in the directory.
1572 			 */
1573 
1574 			if (!search_done) {
1575 				ret = __add_inode_ref(trans, root, path, log,
1576 						      BTRFS_I(dir),
1577 						      BTRFS_I(inode),
1578 						      inode_objectid,
1579 						      parent_objectid,
1580 						      ref_index, name, namelen,
1581 						      &search_done);
1582 				if (ret) {
1583 					if (ret == 1)
1584 						ret = 0;
1585 					goto out;
1586 				}
1587 			}
1588 
1589 			/*
1590 			 * If a reference item already exists for this inode
1591 			 * with the same parent and name, but different index,
1592 			 * drop it and the corresponding directory index entries
1593 			 * from the parent before adding the new reference item
1594 			 * and dir index entries, otherwise we would fail with
1595 			 * -EEXIST returned from btrfs_add_link() below.
1596 			 */
1597 			ret = btrfs_inode_ref_exists(inode, dir, key->type,
1598 						     name, namelen);
1599 			if (ret > 0) {
1600 				ret = btrfs_unlink_inode(trans,
1601 							 BTRFS_I(dir),
1602 							 BTRFS_I(inode),
1603 							 name, namelen);
1604 				/*
1605 				 * If we dropped the link count to 0, bump it so
1606 				 * that later the iput() on the inode will not
1607 				 * free it. We will fixup the link count later.
1608 				 */
1609 				if (!ret && inode->i_nlink == 0)
1610 					inc_nlink(inode);
1611 			}
1612 			if (ret < 0)
1613 				goto out;
1614 
1615 			/* insert our name */
1616 			ret = add_link(trans, dir, inode, name, namelen,
1617 				       ref_index);
1618 			if (ret)
1619 				goto out;
1620 
1621 			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1622 			if (ret)
1623 				goto out;
1624 		}
1625 		/* Else, ret == 1, we already have a perfect match, we're done. */
1626 
1627 		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1628 		kfree(name);
1629 		name = NULL;
1630 		if (log_ref_ver) {
1631 			iput(dir);
1632 			dir = NULL;
1633 		}
1634 	}
1635 
1636 	/*
1637 	 * Before we overwrite the inode reference item in the subvolume tree
1638 	 * with the item from the log tree, we must unlink all names from the
1639 	 * parent directory that are in the subvolume's tree inode reference
1640 	 * item, otherwise we end up with an inconsistent subvolume tree where
1641 	 * dir index entries exist for a name but there is no inode reference
1642 	 * item with the same name.
1643 	 */
1644 	ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1645 				    key);
1646 	if (ret)
1647 		goto out;
1648 
1649 	/* finally write the back reference in the inode */
1650 	ret = overwrite_item(trans, root, path, eb, slot, key);
1651 out:
1652 	btrfs_release_path(path);
1653 	kfree(name);
1654 	iput(dir);
1655 	iput(inode);
1656 	return ret;
1657 }
1658 
1659 static int count_inode_extrefs(struct btrfs_root *root,
1660 		struct btrfs_inode *inode, struct btrfs_path *path)
1661 {
1662 	int ret = 0;
1663 	int name_len;
1664 	unsigned int nlink = 0;
1665 	u32 item_size;
1666 	u32 cur_offset = 0;
1667 	u64 inode_objectid = btrfs_ino(inode);
1668 	u64 offset = 0;
1669 	unsigned long ptr;
1670 	struct btrfs_inode_extref *extref;
1671 	struct extent_buffer *leaf;
1672 
1673 	while (1) {
1674 		ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1675 					    &extref, &offset);
1676 		if (ret)
1677 			break;
1678 
1679 		leaf = path->nodes[0];
1680 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1681 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1682 		cur_offset = 0;
1683 
1684 		while (cur_offset < item_size) {
1685 			extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1686 			name_len = btrfs_inode_extref_name_len(leaf, extref);
1687 
1688 			nlink++;
1689 
1690 			cur_offset += name_len + sizeof(*extref);
1691 		}
1692 
1693 		offset++;
1694 		btrfs_release_path(path);
1695 	}
1696 	btrfs_release_path(path);
1697 
1698 	if (ret < 0 && ret != -ENOENT)
1699 		return ret;
1700 	return nlink;
1701 }
1702 
1703 static int count_inode_refs(struct btrfs_root *root,
1704 			struct btrfs_inode *inode, struct btrfs_path *path)
1705 {
1706 	int ret;
1707 	struct btrfs_key key;
1708 	unsigned int nlink = 0;
1709 	unsigned long ptr;
1710 	unsigned long ptr_end;
1711 	int name_len;
1712 	u64 ino = btrfs_ino(inode);
1713 
1714 	key.objectid = ino;
1715 	key.type = BTRFS_INODE_REF_KEY;
1716 	key.offset = (u64)-1;
1717 
1718 	while (1) {
1719 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1720 		if (ret < 0)
1721 			break;
1722 		if (ret > 0) {
1723 			if (path->slots[0] == 0)
1724 				break;
1725 			path->slots[0]--;
1726 		}
1727 process_slot:
1728 		btrfs_item_key_to_cpu(path->nodes[0], &key,
1729 				      path->slots[0]);
1730 		if (key.objectid != ino ||
1731 		    key.type != BTRFS_INODE_REF_KEY)
1732 			break;
1733 		ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1734 		ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1735 						   path->slots[0]);
1736 		while (ptr < ptr_end) {
1737 			struct btrfs_inode_ref *ref;
1738 
1739 			ref = (struct btrfs_inode_ref *)ptr;
1740 			name_len = btrfs_inode_ref_name_len(path->nodes[0],
1741 							    ref);
1742 			ptr = (unsigned long)(ref + 1) + name_len;
1743 			nlink++;
1744 		}
1745 
1746 		if (key.offset == 0)
1747 			break;
1748 		if (path->slots[0] > 0) {
1749 			path->slots[0]--;
1750 			goto process_slot;
1751 		}
1752 		key.offset--;
1753 		btrfs_release_path(path);
1754 	}
1755 	btrfs_release_path(path);
1756 
1757 	return nlink;
1758 }
1759 
1760 /*
1761  * There are a few corners where the link count of the file can't
1762  * be properly maintained during replay.  So, instead of adding
1763  * lots of complexity to the log code, we just scan the backrefs
1764  * for any file that has been through replay.
1765  *
1766  * The scan will update the link count on the inode to reflect the
1767  * number of back refs found.  If it goes down to zero, the iput
1768  * will free the inode.
1769  */
1770 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1771 					   struct btrfs_root *root,
1772 					   struct inode *inode)
1773 {
1774 	struct btrfs_path *path;
1775 	int ret;
1776 	u64 nlink = 0;
1777 	u64 ino = btrfs_ino(BTRFS_I(inode));
1778 
1779 	path = btrfs_alloc_path();
1780 	if (!path)
1781 		return -ENOMEM;
1782 
1783 	ret = count_inode_refs(root, BTRFS_I(inode), path);
1784 	if (ret < 0)
1785 		goto out;
1786 
1787 	nlink = ret;
1788 
1789 	ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1790 	if (ret < 0)
1791 		goto out;
1792 
1793 	nlink += ret;
1794 
1795 	ret = 0;
1796 
1797 	if (nlink != inode->i_nlink) {
1798 		set_nlink(inode, nlink);
1799 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1800 		if (ret)
1801 			goto out;
1802 	}
1803 	BTRFS_I(inode)->index_cnt = (u64)-1;
1804 
1805 	if (inode->i_nlink == 0) {
1806 		if (S_ISDIR(inode->i_mode)) {
1807 			ret = replay_dir_deletes(trans, root, NULL, path,
1808 						 ino, 1);
1809 			if (ret)
1810 				goto out;
1811 		}
1812 		ret = btrfs_insert_orphan_item(trans, root, ino);
1813 		if (ret == -EEXIST)
1814 			ret = 0;
1815 	}
1816 
1817 out:
1818 	btrfs_free_path(path);
1819 	return ret;
1820 }
1821 
1822 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1823 					    struct btrfs_root *root,
1824 					    struct btrfs_path *path)
1825 {
1826 	int ret;
1827 	struct btrfs_key key;
1828 	struct inode *inode;
1829 
1830 	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1831 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1832 	key.offset = (u64)-1;
1833 	while (1) {
1834 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1835 		if (ret < 0)
1836 			break;
1837 
1838 		if (ret == 1) {
1839 			ret = 0;
1840 			if (path->slots[0] == 0)
1841 				break;
1842 			path->slots[0]--;
1843 		}
1844 
1845 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1846 		if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1847 		    key.type != BTRFS_ORPHAN_ITEM_KEY)
1848 			break;
1849 
1850 		ret = btrfs_del_item(trans, root, path);
1851 		if (ret)
1852 			break;
1853 
1854 		btrfs_release_path(path);
1855 		inode = read_one_inode(root, key.offset);
1856 		if (!inode) {
1857 			ret = -EIO;
1858 			break;
1859 		}
1860 
1861 		ret = fixup_inode_link_count(trans, root, inode);
1862 		iput(inode);
1863 		if (ret)
1864 			break;
1865 
1866 		/*
1867 		 * fixup on a directory may create new entries,
1868 		 * make sure we always look for the highset possible
1869 		 * offset
1870 		 */
1871 		key.offset = (u64)-1;
1872 	}
1873 	btrfs_release_path(path);
1874 	return ret;
1875 }
1876 
1877 
1878 /*
1879  * record a given inode in the fixup dir so we can check its link
1880  * count when replay is done.  The link count is incremented here
1881  * so the inode won't go away until we check it
1882  */
1883 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1884 				      struct btrfs_root *root,
1885 				      struct btrfs_path *path,
1886 				      u64 objectid)
1887 {
1888 	struct btrfs_key key;
1889 	int ret = 0;
1890 	struct inode *inode;
1891 
1892 	inode = read_one_inode(root, objectid);
1893 	if (!inode)
1894 		return -EIO;
1895 
1896 	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1897 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1898 	key.offset = objectid;
1899 
1900 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1901 
1902 	btrfs_release_path(path);
1903 	if (ret == 0) {
1904 		if (!inode->i_nlink)
1905 			set_nlink(inode, 1);
1906 		else
1907 			inc_nlink(inode);
1908 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1909 	} else if (ret == -EEXIST) {
1910 		ret = 0;
1911 	}
1912 	iput(inode);
1913 
1914 	return ret;
1915 }
1916 
1917 /*
1918  * when replaying the log for a directory, we only insert names
1919  * for inodes that actually exist.  This means an fsync on a directory
1920  * does not implicitly fsync all the new files in it
1921  */
1922 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1923 				    struct btrfs_root *root,
1924 				    u64 dirid, u64 index,
1925 				    char *name, int name_len,
1926 				    struct btrfs_key *location)
1927 {
1928 	struct inode *inode;
1929 	struct inode *dir;
1930 	int ret;
1931 
1932 	inode = read_one_inode(root, location->objectid);
1933 	if (!inode)
1934 		return -ENOENT;
1935 
1936 	dir = read_one_inode(root, dirid);
1937 	if (!dir) {
1938 		iput(inode);
1939 		return -EIO;
1940 	}
1941 
1942 	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1943 			name_len, 1, index);
1944 
1945 	/* FIXME, put inode into FIXUP list */
1946 
1947 	iput(inode);
1948 	iput(dir);
1949 	return ret;
1950 }
1951 
1952 /*
1953  * take a single entry in a log directory item and replay it into
1954  * the subvolume.
1955  *
1956  * if a conflicting item exists in the subdirectory already,
1957  * the inode it points to is unlinked and put into the link count
1958  * fix up tree.
1959  *
1960  * If a name from the log points to a file or directory that does
1961  * not exist in the FS, it is skipped.  fsyncs on directories
1962  * do not force down inodes inside that directory, just changes to the
1963  * names or unlinks in a directory.
1964  *
1965  * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1966  * non-existing inode) and 1 if the name was replayed.
1967  */
1968 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1969 				    struct btrfs_root *root,
1970 				    struct btrfs_path *path,
1971 				    struct extent_buffer *eb,
1972 				    struct btrfs_dir_item *di,
1973 				    struct btrfs_key *key)
1974 {
1975 	char *name;
1976 	int name_len;
1977 	struct btrfs_dir_item *dst_di;
1978 	struct btrfs_key found_key;
1979 	struct btrfs_key log_key;
1980 	struct inode *dir;
1981 	u8 log_type;
1982 	bool exists;
1983 	int ret;
1984 	bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1985 	bool name_added = false;
1986 
1987 	dir = read_one_inode(root, key->objectid);
1988 	if (!dir)
1989 		return -EIO;
1990 
1991 	name_len = btrfs_dir_name_len(eb, di);
1992 	name = kmalloc(name_len, GFP_NOFS);
1993 	if (!name) {
1994 		ret = -ENOMEM;
1995 		goto out;
1996 	}
1997 
1998 	log_type = btrfs_dir_type(eb, di);
1999 	read_extent_buffer(eb, name, (unsigned long)(di + 1),
2000 		   name_len);
2001 
2002 	btrfs_dir_item_key_to_cpu(eb, di, &log_key);
2003 	ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
2004 	btrfs_release_path(path);
2005 	if (ret < 0)
2006 		goto out;
2007 	exists = (ret == 0);
2008 	ret = 0;
2009 
2010 	if (key->type == BTRFS_DIR_ITEM_KEY) {
2011 		dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
2012 				       name, name_len, 1);
2013 	} else if (key->type == BTRFS_DIR_INDEX_KEY) {
2014 		dst_di = btrfs_lookup_dir_index_item(trans, root, path,
2015 						     key->objectid,
2016 						     key->offset, name,
2017 						     name_len, 1);
2018 	} else {
2019 		/* Corruption */
2020 		ret = -EINVAL;
2021 		goto out;
2022 	}
2023 
2024 	if (IS_ERR(dst_di)) {
2025 		ret = PTR_ERR(dst_di);
2026 		goto out;
2027 	} else if (!dst_di) {
2028 		/* we need a sequence number to insert, so we only
2029 		 * do inserts for the BTRFS_DIR_INDEX_KEY types
2030 		 */
2031 		if (key->type != BTRFS_DIR_INDEX_KEY)
2032 			goto out;
2033 		goto insert;
2034 	}
2035 
2036 	btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
2037 	/* the existing item matches the logged item */
2038 	if (found_key.objectid == log_key.objectid &&
2039 	    found_key.type == log_key.type &&
2040 	    found_key.offset == log_key.offset &&
2041 	    btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
2042 		update_size = false;
2043 		goto out;
2044 	}
2045 
2046 	/*
2047 	 * don't drop the conflicting directory entry if the inode
2048 	 * for the new entry doesn't exist
2049 	 */
2050 	if (!exists)
2051 		goto out;
2052 
2053 	ret = drop_one_dir_item(trans, path, BTRFS_I(dir), dst_di);
2054 	if (ret)
2055 		goto out;
2056 
2057 	if (key->type == BTRFS_DIR_INDEX_KEY)
2058 		goto insert;
2059 out:
2060 	btrfs_release_path(path);
2061 	if (!ret && update_size) {
2062 		btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2063 		ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
2064 	}
2065 	kfree(name);
2066 	iput(dir);
2067 	if (!ret && name_added)
2068 		ret = 1;
2069 	return ret;
2070 
2071 insert:
2072 	/*
2073 	 * Check if the inode reference exists in the log for the given name,
2074 	 * inode and parent inode
2075 	 */
2076 	found_key.objectid = log_key.objectid;
2077 	found_key.type = BTRFS_INODE_REF_KEY;
2078 	found_key.offset = key->objectid;
2079 	ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
2080 	if (ret < 0) {
2081 	        goto out;
2082 	} else if (ret) {
2083 	        /* The dentry will be added later. */
2084 	        ret = 0;
2085 	        update_size = false;
2086 	        goto out;
2087 	}
2088 
2089 	found_key.objectid = log_key.objectid;
2090 	found_key.type = BTRFS_INODE_EXTREF_KEY;
2091 	found_key.offset = key->objectid;
2092 	ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2093 			     name_len);
2094 	if (ret < 0) {
2095 		goto out;
2096 	} else if (ret) {
2097 		/* The dentry will be added later. */
2098 		ret = 0;
2099 		update_size = false;
2100 		goto out;
2101 	}
2102 	btrfs_release_path(path);
2103 	ret = insert_one_name(trans, root, key->objectid, key->offset,
2104 			      name, name_len, &log_key);
2105 	if (ret && ret != -ENOENT && ret != -EEXIST)
2106 		goto out;
2107 	if (!ret)
2108 		name_added = true;
2109 	update_size = false;
2110 	ret = 0;
2111 	goto out;
2112 }
2113 
2114 /*
2115  * find all the names in a directory item and reconcile them into
2116  * the subvolume.  Only BTRFS_DIR_ITEM_KEY types will have more than
2117  * one name in a directory item, but the same code gets used for
2118  * both directory index types
2119  */
2120 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2121 					struct btrfs_root *root,
2122 					struct btrfs_path *path,
2123 					struct extent_buffer *eb, int slot,
2124 					struct btrfs_key *key)
2125 {
2126 	int ret = 0;
2127 	u32 item_size = btrfs_item_size_nr(eb, slot);
2128 	struct btrfs_dir_item *di;
2129 	int name_len;
2130 	unsigned long ptr;
2131 	unsigned long ptr_end;
2132 	struct btrfs_path *fixup_path = NULL;
2133 
2134 	ptr = btrfs_item_ptr_offset(eb, slot);
2135 	ptr_end = ptr + item_size;
2136 	while (ptr < ptr_end) {
2137 		di = (struct btrfs_dir_item *)ptr;
2138 		name_len = btrfs_dir_name_len(eb, di);
2139 		ret = replay_one_name(trans, root, path, eb, di, key);
2140 		if (ret < 0)
2141 			break;
2142 		ptr = (unsigned long)(di + 1);
2143 		ptr += name_len;
2144 
2145 		/*
2146 		 * If this entry refers to a non-directory (directories can not
2147 		 * have a link count > 1) and it was added in the transaction
2148 		 * that was not committed, make sure we fixup the link count of
2149 		 * the inode it the entry points to. Otherwise something like
2150 		 * the following would result in a directory pointing to an
2151 		 * inode with a wrong link that does not account for this dir
2152 		 * entry:
2153 		 *
2154 		 * mkdir testdir
2155 		 * touch testdir/foo
2156 		 * touch testdir/bar
2157 		 * sync
2158 		 *
2159 		 * ln testdir/bar testdir/bar_link
2160 		 * ln testdir/foo testdir/foo_link
2161 		 * xfs_io -c "fsync" testdir/bar
2162 		 *
2163 		 * <power failure>
2164 		 *
2165 		 * mount fs, log replay happens
2166 		 *
2167 		 * File foo would remain with a link count of 1 when it has two
2168 		 * entries pointing to it in the directory testdir. This would
2169 		 * make it impossible to ever delete the parent directory has
2170 		 * it would result in stale dentries that can never be deleted.
2171 		 */
2172 		if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2173 			struct btrfs_key di_key;
2174 
2175 			if (!fixup_path) {
2176 				fixup_path = btrfs_alloc_path();
2177 				if (!fixup_path) {
2178 					ret = -ENOMEM;
2179 					break;
2180 				}
2181 			}
2182 
2183 			btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2184 			ret = link_to_fixup_dir(trans, root, fixup_path,
2185 						di_key.objectid);
2186 			if (ret)
2187 				break;
2188 		}
2189 		ret = 0;
2190 	}
2191 	btrfs_free_path(fixup_path);
2192 	return ret;
2193 }
2194 
2195 /*
2196  * directory replay has two parts.  There are the standard directory
2197  * items in the log copied from the subvolume, and range items
2198  * created in the log while the subvolume was logged.
2199  *
2200  * The range items tell us which parts of the key space the log
2201  * is authoritative for.  During replay, if a key in the subvolume
2202  * directory is in a logged range item, but not actually in the log
2203  * that means it was deleted from the directory before the fsync
2204  * and should be removed.
2205  */
2206 static noinline int find_dir_range(struct btrfs_root *root,
2207 				   struct btrfs_path *path,
2208 				   u64 dirid, int key_type,
2209 				   u64 *start_ret, u64 *end_ret)
2210 {
2211 	struct btrfs_key key;
2212 	u64 found_end;
2213 	struct btrfs_dir_log_item *item;
2214 	int ret;
2215 	int nritems;
2216 
2217 	if (*start_ret == (u64)-1)
2218 		return 1;
2219 
2220 	key.objectid = dirid;
2221 	key.type = key_type;
2222 	key.offset = *start_ret;
2223 
2224 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2225 	if (ret < 0)
2226 		goto out;
2227 	if (ret > 0) {
2228 		if (path->slots[0] == 0)
2229 			goto out;
2230 		path->slots[0]--;
2231 	}
2232 	if (ret != 0)
2233 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2234 
2235 	if (key.type != key_type || key.objectid != dirid) {
2236 		ret = 1;
2237 		goto next;
2238 	}
2239 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2240 			      struct btrfs_dir_log_item);
2241 	found_end = btrfs_dir_log_end(path->nodes[0], item);
2242 
2243 	if (*start_ret >= key.offset && *start_ret <= found_end) {
2244 		ret = 0;
2245 		*start_ret = key.offset;
2246 		*end_ret = found_end;
2247 		goto out;
2248 	}
2249 	ret = 1;
2250 next:
2251 	/* check the next slot in the tree to see if it is a valid item */
2252 	nritems = btrfs_header_nritems(path->nodes[0]);
2253 	path->slots[0]++;
2254 	if (path->slots[0] >= nritems) {
2255 		ret = btrfs_next_leaf(root, path);
2256 		if (ret)
2257 			goto out;
2258 	}
2259 
2260 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2261 
2262 	if (key.type != key_type || key.objectid != dirid) {
2263 		ret = 1;
2264 		goto out;
2265 	}
2266 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2267 			      struct btrfs_dir_log_item);
2268 	found_end = btrfs_dir_log_end(path->nodes[0], item);
2269 	*start_ret = key.offset;
2270 	*end_ret = found_end;
2271 	ret = 0;
2272 out:
2273 	btrfs_release_path(path);
2274 	return ret;
2275 }
2276 
2277 /*
2278  * this looks for a given directory item in the log.  If the directory
2279  * item is not in the log, the item is removed and the inode it points
2280  * to is unlinked
2281  */
2282 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2283 				      struct btrfs_root *log,
2284 				      struct btrfs_path *path,
2285 				      struct btrfs_path *log_path,
2286 				      struct inode *dir,
2287 				      struct btrfs_key *dir_key)
2288 {
2289 	struct btrfs_root *root = BTRFS_I(dir)->root;
2290 	int ret;
2291 	struct extent_buffer *eb;
2292 	int slot;
2293 	u32 item_size;
2294 	struct btrfs_dir_item *di;
2295 	struct btrfs_dir_item *log_di;
2296 	int name_len;
2297 	unsigned long ptr;
2298 	unsigned long ptr_end;
2299 	char *name;
2300 	struct inode *inode;
2301 	struct btrfs_key location;
2302 
2303 again:
2304 	eb = path->nodes[0];
2305 	slot = path->slots[0];
2306 	item_size = btrfs_item_size_nr(eb, slot);
2307 	ptr = btrfs_item_ptr_offset(eb, slot);
2308 	ptr_end = ptr + item_size;
2309 	while (ptr < ptr_end) {
2310 		di = (struct btrfs_dir_item *)ptr;
2311 		name_len = btrfs_dir_name_len(eb, di);
2312 		name = kmalloc(name_len, GFP_NOFS);
2313 		if (!name) {
2314 			ret = -ENOMEM;
2315 			goto out;
2316 		}
2317 		read_extent_buffer(eb, name, (unsigned long)(di + 1),
2318 				  name_len);
2319 		log_di = NULL;
2320 		if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2321 			log_di = btrfs_lookup_dir_item(trans, log, log_path,
2322 						       dir_key->objectid,
2323 						       name, name_len, 0);
2324 		} else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2325 			log_di = btrfs_lookup_dir_index_item(trans, log,
2326 						     log_path,
2327 						     dir_key->objectid,
2328 						     dir_key->offset,
2329 						     name, name_len, 0);
2330 		}
2331 		if (!log_di) {
2332 			btrfs_dir_item_key_to_cpu(eb, di, &location);
2333 			btrfs_release_path(path);
2334 			btrfs_release_path(log_path);
2335 			inode = read_one_inode(root, location.objectid);
2336 			if (!inode) {
2337 				kfree(name);
2338 				return -EIO;
2339 			}
2340 
2341 			ret = link_to_fixup_dir(trans, root,
2342 						path, location.objectid);
2343 			if (ret) {
2344 				kfree(name);
2345 				iput(inode);
2346 				goto out;
2347 			}
2348 
2349 			inc_nlink(inode);
2350 			ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
2351 					BTRFS_I(inode), name, name_len);
2352 			if (!ret)
2353 				ret = btrfs_run_delayed_items(trans);
2354 			kfree(name);
2355 			iput(inode);
2356 			if (ret)
2357 				goto out;
2358 
2359 			/* there might still be more names under this key
2360 			 * check and repeat if required
2361 			 */
2362 			ret = btrfs_search_slot(NULL, root, dir_key, path,
2363 						0, 0);
2364 			if (ret == 0)
2365 				goto again;
2366 			ret = 0;
2367 			goto out;
2368 		} else if (IS_ERR(log_di)) {
2369 			kfree(name);
2370 			return PTR_ERR(log_di);
2371 		}
2372 		btrfs_release_path(log_path);
2373 		kfree(name);
2374 
2375 		ptr = (unsigned long)(di + 1);
2376 		ptr += name_len;
2377 	}
2378 	ret = 0;
2379 out:
2380 	btrfs_release_path(path);
2381 	btrfs_release_path(log_path);
2382 	return ret;
2383 }
2384 
2385 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2386 			      struct btrfs_root *root,
2387 			      struct btrfs_root *log,
2388 			      struct btrfs_path *path,
2389 			      const u64 ino)
2390 {
2391 	struct btrfs_key search_key;
2392 	struct btrfs_path *log_path;
2393 	int i;
2394 	int nritems;
2395 	int ret;
2396 
2397 	log_path = btrfs_alloc_path();
2398 	if (!log_path)
2399 		return -ENOMEM;
2400 
2401 	search_key.objectid = ino;
2402 	search_key.type = BTRFS_XATTR_ITEM_KEY;
2403 	search_key.offset = 0;
2404 again:
2405 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2406 	if (ret < 0)
2407 		goto out;
2408 process_leaf:
2409 	nritems = btrfs_header_nritems(path->nodes[0]);
2410 	for (i = path->slots[0]; i < nritems; i++) {
2411 		struct btrfs_key key;
2412 		struct btrfs_dir_item *di;
2413 		struct btrfs_dir_item *log_di;
2414 		u32 total_size;
2415 		u32 cur;
2416 
2417 		btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2418 		if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2419 			ret = 0;
2420 			goto out;
2421 		}
2422 
2423 		di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2424 		total_size = btrfs_item_size_nr(path->nodes[0], i);
2425 		cur = 0;
2426 		while (cur < total_size) {
2427 			u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2428 			u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2429 			u32 this_len = sizeof(*di) + name_len + data_len;
2430 			char *name;
2431 
2432 			name = kmalloc(name_len, GFP_NOFS);
2433 			if (!name) {
2434 				ret = -ENOMEM;
2435 				goto out;
2436 			}
2437 			read_extent_buffer(path->nodes[0], name,
2438 					   (unsigned long)(di + 1), name_len);
2439 
2440 			log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2441 						    name, name_len, 0);
2442 			btrfs_release_path(log_path);
2443 			if (!log_di) {
2444 				/* Doesn't exist in log tree, so delete it. */
2445 				btrfs_release_path(path);
2446 				di = btrfs_lookup_xattr(trans, root, path, ino,
2447 							name, name_len, -1);
2448 				kfree(name);
2449 				if (IS_ERR(di)) {
2450 					ret = PTR_ERR(di);
2451 					goto out;
2452 				}
2453 				ASSERT(di);
2454 				ret = btrfs_delete_one_dir_name(trans, root,
2455 								path, di);
2456 				if (ret)
2457 					goto out;
2458 				btrfs_release_path(path);
2459 				search_key = key;
2460 				goto again;
2461 			}
2462 			kfree(name);
2463 			if (IS_ERR(log_di)) {
2464 				ret = PTR_ERR(log_di);
2465 				goto out;
2466 			}
2467 			cur += this_len;
2468 			di = (struct btrfs_dir_item *)((char *)di + this_len);
2469 		}
2470 	}
2471 	ret = btrfs_next_leaf(root, path);
2472 	if (ret > 0)
2473 		ret = 0;
2474 	else if (ret == 0)
2475 		goto process_leaf;
2476 out:
2477 	btrfs_free_path(log_path);
2478 	btrfs_release_path(path);
2479 	return ret;
2480 }
2481 
2482 
2483 /*
2484  * deletion replay happens before we copy any new directory items
2485  * out of the log or out of backreferences from inodes.  It
2486  * scans the log to find ranges of keys that log is authoritative for,
2487  * and then scans the directory to find items in those ranges that are
2488  * not present in the log.
2489  *
2490  * Anything we don't find in the log is unlinked and removed from the
2491  * directory.
2492  */
2493 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2494 				       struct btrfs_root *root,
2495 				       struct btrfs_root *log,
2496 				       struct btrfs_path *path,
2497 				       u64 dirid, int del_all)
2498 {
2499 	u64 range_start;
2500 	u64 range_end;
2501 	int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2502 	int ret = 0;
2503 	struct btrfs_key dir_key;
2504 	struct btrfs_key found_key;
2505 	struct btrfs_path *log_path;
2506 	struct inode *dir;
2507 
2508 	dir_key.objectid = dirid;
2509 	dir_key.type = BTRFS_DIR_ITEM_KEY;
2510 	log_path = btrfs_alloc_path();
2511 	if (!log_path)
2512 		return -ENOMEM;
2513 
2514 	dir = read_one_inode(root, dirid);
2515 	/* it isn't an error if the inode isn't there, that can happen
2516 	 * because we replay the deletes before we copy in the inode item
2517 	 * from the log
2518 	 */
2519 	if (!dir) {
2520 		btrfs_free_path(log_path);
2521 		return 0;
2522 	}
2523 again:
2524 	range_start = 0;
2525 	range_end = 0;
2526 	while (1) {
2527 		if (del_all)
2528 			range_end = (u64)-1;
2529 		else {
2530 			ret = find_dir_range(log, path, dirid, key_type,
2531 					     &range_start, &range_end);
2532 			if (ret < 0)
2533 				goto out;
2534 			else if (ret > 0)
2535 				break;
2536 		}
2537 
2538 		dir_key.offset = range_start;
2539 		while (1) {
2540 			int nritems;
2541 			ret = btrfs_search_slot(NULL, root, &dir_key, path,
2542 						0, 0);
2543 			if (ret < 0)
2544 				goto out;
2545 
2546 			nritems = btrfs_header_nritems(path->nodes[0]);
2547 			if (path->slots[0] >= nritems) {
2548 				ret = btrfs_next_leaf(root, path);
2549 				if (ret == 1)
2550 					break;
2551 				else if (ret < 0)
2552 					goto out;
2553 			}
2554 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2555 					      path->slots[0]);
2556 			if (found_key.objectid != dirid ||
2557 			    found_key.type != dir_key.type)
2558 				goto next_type;
2559 
2560 			if (found_key.offset > range_end)
2561 				break;
2562 
2563 			ret = check_item_in_log(trans, log, path,
2564 						log_path, dir,
2565 						&found_key);
2566 			if (ret)
2567 				goto out;
2568 			if (found_key.offset == (u64)-1)
2569 				break;
2570 			dir_key.offset = found_key.offset + 1;
2571 		}
2572 		btrfs_release_path(path);
2573 		if (range_end == (u64)-1)
2574 			break;
2575 		range_start = range_end + 1;
2576 	}
2577 
2578 next_type:
2579 	ret = 0;
2580 	if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2581 		key_type = BTRFS_DIR_LOG_INDEX_KEY;
2582 		dir_key.type = BTRFS_DIR_INDEX_KEY;
2583 		btrfs_release_path(path);
2584 		goto again;
2585 	}
2586 out:
2587 	btrfs_release_path(path);
2588 	btrfs_free_path(log_path);
2589 	iput(dir);
2590 	return ret;
2591 }
2592 
2593 /*
2594  * the process_func used to replay items from the log tree.  This
2595  * gets called in two different stages.  The first stage just looks
2596  * for inodes and makes sure they are all copied into the subvolume.
2597  *
2598  * The second stage copies all the other item types from the log into
2599  * the subvolume.  The two stage approach is slower, but gets rid of
2600  * lots of complexity around inodes referencing other inodes that exist
2601  * only in the log (references come from either directory items or inode
2602  * back refs).
2603  */
2604 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2605 			     struct walk_control *wc, u64 gen, int level)
2606 {
2607 	int nritems;
2608 	struct btrfs_path *path;
2609 	struct btrfs_root *root = wc->replay_dest;
2610 	struct btrfs_key key;
2611 	int i;
2612 	int ret;
2613 
2614 	ret = btrfs_read_buffer(eb, gen, level, NULL);
2615 	if (ret)
2616 		return ret;
2617 
2618 	level = btrfs_header_level(eb);
2619 
2620 	if (level != 0)
2621 		return 0;
2622 
2623 	path = btrfs_alloc_path();
2624 	if (!path)
2625 		return -ENOMEM;
2626 
2627 	nritems = btrfs_header_nritems(eb);
2628 	for (i = 0; i < nritems; i++) {
2629 		btrfs_item_key_to_cpu(eb, &key, i);
2630 
2631 		/* inode keys are done during the first stage */
2632 		if (key.type == BTRFS_INODE_ITEM_KEY &&
2633 		    wc->stage == LOG_WALK_REPLAY_INODES) {
2634 			struct btrfs_inode_item *inode_item;
2635 			u32 mode;
2636 
2637 			inode_item = btrfs_item_ptr(eb, i,
2638 					    struct btrfs_inode_item);
2639 			/*
2640 			 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2641 			 * and never got linked before the fsync, skip it, as
2642 			 * replaying it is pointless since it would be deleted
2643 			 * later. We skip logging tmpfiles, but it's always
2644 			 * possible we are replaying a log created with a kernel
2645 			 * that used to log tmpfiles.
2646 			 */
2647 			if (btrfs_inode_nlink(eb, inode_item) == 0) {
2648 				wc->ignore_cur_inode = true;
2649 				continue;
2650 			} else {
2651 				wc->ignore_cur_inode = false;
2652 			}
2653 			ret = replay_xattr_deletes(wc->trans, root, log,
2654 						   path, key.objectid);
2655 			if (ret)
2656 				break;
2657 			mode = btrfs_inode_mode(eb, inode_item);
2658 			if (S_ISDIR(mode)) {
2659 				ret = replay_dir_deletes(wc->trans,
2660 					 root, log, path, key.objectid, 0);
2661 				if (ret)
2662 					break;
2663 			}
2664 			ret = overwrite_item(wc->trans, root, path,
2665 					     eb, i, &key);
2666 			if (ret)
2667 				break;
2668 
2669 			/*
2670 			 * Before replaying extents, truncate the inode to its
2671 			 * size. We need to do it now and not after log replay
2672 			 * because before an fsync we can have prealloc extents
2673 			 * added beyond the inode's i_size. If we did it after,
2674 			 * through orphan cleanup for example, we would drop
2675 			 * those prealloc extents just after replaying them.
2676 			 */
2677 			if (S_ISREG(mode)) {
2678 				struct btrfs_drop_extents_args drop_args = { 0 };
2679 				struct inode *inode;
2680 				u64 from;
2681 
2682 				inode = read_one_inode(root, key.objectid);
2683 				if (!inode) {
2684 					ret = -EIO;
2685 					break;
2686 				}
2687 				from = ALIGN(i_size_read(inode),
2688 					     root->fs_info->sectorsize);
2689 				drop_args.start = from;
2690 				drop_args.end = (u64)-1;
2691 				drop_args.drop_cache = true;
2692 				ret = btrfs_drop_extents(wc->trans, root,
2693 							 BTRFS_I(inode),
2694 							 &drop_args);
2695 				if (!ret) {
2696 					inode_sub_bytes(inode,
2697 							drop_args.bytes_found);
2698 					/* Update the inode's nbytes. */
2699 					ret = btrfs_update_inode(wc->trans,
2700 							root, BTRFS_I(inode));
2701 				}
2702 				iput(inode);
2703 				if (ret)
2704 					break;
2705 			}
2706 
2707 			ret = link_to_fixup_dir(wc->trans, root,
2708 						path, key.objectid);
2709 			if (ret)
2710 				break;
2711 		}
2712 
2713 		if (wc->ignore_cur_inode)
2714 			continue;
2715 
2716 		if (key.type == BTRFS_DIR_INDEX_KEY &&
2717 		    wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2718 			ret = replay_one_dir_item(wc->trans, root, path,
2719 						  eb, i, &key);
2720 			if (ret)
2721 				break;
2722 		}
2723 
2724 		if (wc->stage < LOG_WALK_REPLAY_ALL)
2725 			continue;
2726 
2727 		/* these keys are simply copied */
2728 		if (key.type == BTRFS_XATTR_ITEM_KEY) {
2729 			ret = overwrite_item(wc->trans, root, path,
2730 					     eb, i, &key);
2731 			if (ret)
2732 				break;
2733 		} else if (key.type == BTRFS_INODE_REF_KEY ||
2734 			   key.type == BTRFS_INODE_EXTREF_KEY) {
2735 			ret = add_inode_ref(wc->trans, root, log, path,
2736 					    eb, i, &key);
2737 			if (ret && ret != -ENOENT)
2738 				break;
2739 			ret = 0;
2740 		} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2741 			ret = replay_one_extent(wc->trans, root, path,
2742 						eb, i, &key);
2743 			if (ret)
2744 				break;
2745 		} else if (key.type == BTRFS_DIR_ITEM_KEY) {
2746 			ret = replay_one_dir_item(wc->trans, root, path,
2747 						  eb, i, &key);
2748 			if (ret)
2749 				break;
2750 		}
2751 	}
2752 	btrfs_free_path(path);
2753 	return ret;
2754 }
2755 
2756 /*
2757  * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2758  */
2759 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2760 {
2761 	struct btrfs_block_group *cache;
2762 
2763 	cache = btrfs_lookup_block_group(fs_info, start);
2764 	if (!cache) {
2765 		btrfs_err(fs_info, "unable to find block group for %llu", start);
2766 		return;
2767 	}
2768 
2769 	spin_lock(&cache->space_info->lock);
2770 	spin_lock(&cache->lock);
2771 	cache->reserved -= fs_info->nodesize;
2772 	cache->space_info->bytes_reserved -= fs_info->nodesize;
2773 	spin_unlock(&cache->lock);
2774 	spin_unlock(&cache->space_info->lock);
2775 
2776 	btrfs_put_block_group(cache);
2777 }
2778 
2779 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2780 				   struct btrfs_root *root,
2781 				   struct btrfs_path *path, int *level,
2782 				   struct walk_control *wc)
2783 {
2784 	struct btrfs_fs_info *fs_info = root->fs_info;
2785 	u64 bytenr;
2786 	u64 ptr_gen;
2787 	struct extent_buffer *next;
2788 	struct extent_buffer *cur;
2789 	u32 blocksize;
2790 	int ret = 0;
2791 
2792 	while (*level > 0) {
2793 		struct btrfs_key first_key;
2794 
2795 		cur = path->nodes[*level];
2796 
2797 		WARN_ON(btrfs_header_level(cur) != *level);
2798 
2799 		if (path->slots[*level] >=
2800 		    btrfs_header_nritems(cur))
2801 			break;
2802 
2803 		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2804 		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2805 		btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2806 		blocksize = fs_info->nodesize;
2807 
2808 		next = btrfs_find_create_tree_block(fs_info, bytenr,
2809 						    btrfs_header_owner(cur),
2810 						    *level - 1);
2811 		if (IS_ERR(next))
2812 			return PTR_ERR(next);
2813 
2814 		if (*level == 1) {
2815 			ret = wc->process_func(root, next, wc, ptr_gen,
2816 					       *level - 1);
2817 			if (ret) {
2818 				free_extent_buffer(next);
2819 				return ret;
2820 			}
2821 
2822 			path->slots[*level]++;
2823 			if (wc->free) {
2824 				ret = btrfs_read_buffer(next, ptr_gen,
2825 							*level - 1, &first_key);
2826 				if (ret) {
2827 					free_extent_buffer(next);
2828 					return ret;
2829 				}
2830 
2831 				if (trans) {
2832 					btrfs_tree_lock(next);
2833 					btrfs_clean_tree_block(next);
2834 					btrfs_wait_tree_block_writeback(next);
2835 					btrfs_tree_unlock(next);
2836 					ret = btrfs_pin_reserved_extent(trans,
2837 							bytenr, blocksize);
2838 					if (ret) {
2839 						free_extent_buffer(next);
2840 						return ret;
2841 					}
2842 					btrfs_redirty_list_add(
2843 						trans->transaction, next);
2844 				} else {
2845 					if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2846 						clear_extent_buffer_dirty(next);
2847 					unaccount_log_buffer(fs_info, bytenr);
2848 				}
2849 			}
2850 			free_extent_buffer(next);
2851 			continue;
2852 		}
2853 		ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2854 		if (ret) {
2855 			free_extent_buffer(next);
2856 			return ret;
2857 		}
2858 
2859 		if (path->nodes[*level-1])
2860 			free_extent_buffer(path->nodes[*level-1]);
2861 		path->nodes[*level-1] = next;
2862 		*level = btrfs_header_level(next);
2863 		path->slots[*level] = 0;
2864 		cond_resched();
2865 	}
2866 	path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2867 
2868 	cond_resched();
2869 	return 0;
2870 }
2871 
2872 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2873 				 struct btrfs_root *root,
2874 				 struct btrfs_path *path, int *level,
2875 				 struct walk_control *wc)
2876 {
2877 	struct btrfs_fs_info *fs_info = root->fs_info;
2878 	int i;
2879 	int slot;
2880 	int ret;
2881 
2882 	for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2883 		slot = path->slots[i];
2884 		if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2885 			path->slots[i]++;
2886 			*level = i;
2887 			WARN_ON(*level == 0);
2888 			return 0;
2889 		} else {
2890 			ret = wc->process_func(root, path->nodes[*level], wc,
2891 				 btrfs_header_generation(path->nodes[*level]),
2892 				 *level);
2893 			if (ret)
2894 				return ret;
2895 
2896 			if (wc->free) {
2897 				struct extent_buffer *next;
2898 
2899 				next = path->nodes[*level];
2900 
2901 				if (trans) {
2902 					btrfs_tree_lock(next);
2903 					btrfs_clean_tree_block(next);
2904 					btrfs_wait_tree_block_writeback(next);
2905 					btrfs_tree_unlock(next);
2906 					ret = btrfs_pin_reserved_extent(trans,
2907 						     path->nodes[*level]->start,
2908 						     path->nodes[*level]->len);
2909 					if (ret)
2910 						return ret;
2911 					btrfs_redirty_list_add(trans->transaction,
2912 							       next);
2913 				} else {
2914 					if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2915 						clear_extent_buffer_dirty(next);
2916 
2917 					unaccount_log_buffer(fs_info,
2918 						path->nodes[*level]->start);
2919 				}
2920 			}
2921 			free_extent_buffer(path->nodes[*level]);
2922 			path->nodes[*level] = NULL;
2923 			*level = i + 1;
2924 		}
2925 	}
2926 	return 1;
2927 }
2928 
2929 /*
2930  * drop the reference count on the tree rooted at 'snap'.  This traverses
2931  * the tree freeing any blocks that have a ref count of zero after being
2932  * decremented.
2933  */
2934 static int walk_log_tree(struct btrfs_trans_handle *trans,
2935 			 struct btrfs_root *log, struct walk_control *wc)
2936 {
2937 	struct btrfs_fs_info *fs_info = log->fs_info;
2938 	int ret = 0;
2939 	int wret;
2940 	int level;
2941 	struct btrfs_path *path;
2942 	int orig_level;
2943 
2944 	path = btrfs_alloc_path();
2945 	if (!path)
2946 		return -ENOMEM;
2947 
2948 	level = btrfs_header_level(log->node);
2949 	orig_level = level;
2950 	path->nodes[level] = log->node;
2951 	atomic_inc(&log->node->refs);
2952 	path->slots[level] = 0;
2953 
2954 	while (1) {
2955 		wret = walk_down_log_tree(trans, log, path, &level, wc);
2956 		if (wret > 0)
2957 			break;
2958 		if (wret < 0) {
2959 			ret = wret;
2960 			goto out;
2961 		}
2962 
2963 		wret = walk_up_log_tree(trans, log, path, &level, wc);
2964 		if (wret > 0)
2965 			break;
2966 		if (wret < 0) {
2967 			ret = wret;
2968 			goto out;
2969 		}
2970 	}
2971 
2972 	/* was the root node processed? if not, catch it here */
2973 	if (path->nodes[orig_level]) {
2974 		ret = wc->process_func(log, path->nodes[orig_level], wc,
2975 			 btrfs_header_generation(path->nodes[orig_level]),
2976 			 orig_level);
2977 		if (ret)
2978 			goto out;
2979 		if (wc->free) {
2980 			struct extent_buffer *next;
2981 
2982 			next = path->nodes[orig_level];
2983 
2984 			if (trans) {
2985 				btrfs_tree_lock(next);
2986 				btrfs_clean_tree_block(next);
2987 				btrfs_wait_tree_block_writeback(next);
2988 				btrfs_tree_unlock(next);
2989 				ret = btrfs_pin_reserved_extent(trans,
2990 						next->start, next->len);
2991 				if (ret)
2992 					goto out;
2993 				btrfs_redirty_list_add(trans->transaction, next);
2994 			} else {
2995 				if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2996 					clear_extent_buffer_dirty(next);
2997 				unaccount_log_buffer(fs_info, next->start);
2998 			}
2999 		}
3000 	}
3001 
3002 out:
3003 	btrfs_free_path(path);
3004 	return ret;
3005 }
3006 
3007 /*
3008  * helper function to update the item for a given subvolumes log root
3009  * in the tree of log roots
3010  */
3011 static int update_log_root(struct btrfs_trans_handle *trans,
3012 			   struct btrfs_root *log,
3013 			   struct btrfs_root_item *root_item)
3014 {
3015 	struct btrfs_fs_info *fs_info = log->fs_info;
3016 	int ret;
3017 
3018 	if (log->log_transid == 1) {
3019 		/* insert root item on the first sync */
3020 		ret = btrfs_insert_root(trans, fs_info->log_root_tree,
3021 				&log->root_key, root_item);
3022 	} else {
3023 		ret = btrfs_update_root(trans, fs_info->log_root_tree,
3024 				&log->root_key, root_item);
3025 	}
3026 	return ret;
3027 }
3028 
3029 static void wait_log_commit(struct btrfs_root *root, int transid)
3030 {
3031 	DEFINE_WAIT(wait);
3032 	int index = transid % 2;
3033 
3034 	/*
3035 	 * we only allow two pending log transactions at a time,
3036 	 * so we know that if ours is more than 2 older than the
3037 	 * current transaction, we're done
3038 	 */
3039 	for (;;) {
3040 		prepare_to_wait(&root->log_commit_wait[index],
3041 				&wait, TASK_UNINTERRUPTIBLE);
3042 
3043 		if (!(root->log_transid_committed < transid &&
3044 		      atomic_read(&root->log_commit[index])))
3045 			break;
3046 
3047 		mutex_unlock(&root->log_mutex);
3048 		schedule();
3049 		mutex_lock(&root->log_mutex);
3050 	}
3051 	finish_wait(&root->log_commit_wait[index], &wait);
3052 }
3053 
3054 static void wait_for_writer(struct btrfs_root *root)
3055 {
3056 	DEFINE_WAIT(wait);
3057 
3058 	for (;;) {
3059 		prepare_to_wait(&root->log_writer_wait, &wait,
3060 				TASK_UNINTERRUPTIBLE);
3061 		if (!atomic_read(&root->log_writers))
3062 			break;
3063 
3064 		mutex_unlock(&root->log_mutex);
3065 		schedule();
3066 		mutex_lock(&root->log_mutex);
3067 	}
3068 	finish_wait(&root->log_writer_wait, &wait);
3069 }
3070 
3071 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3072 					struct btrfs_log_ctx *ctx)
3073 {
3074 	mutex_lock(&root->log_mutex);
3075 	list_del_init(&ctx->list);
3076 	mutex_unlock(&root->log_mutex);
3077 }
3078 
3079 /*
3080  * Invoked in log mutex context, or be sure there is no other task which
3081  * can access the list.
3082  */
3083 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3084 					     int index, int error)
3085 {
3086 	struct btrfs_log_ctx *ctx;
3087 	struct btrfs_log_ctx *safe;
3088 
3089 	list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3090 		list_del_init(&ctx->list);
3091 		ctx->log_ret = error;
3092 	}
3093 }
3094 
3095 /*
3096  * btrfs_sync_log does sends a given tree log down to the disk and
3097  * updates the super blocks to record it.  When this call is done,
3098  * you know that any inodes previously logged are safely on disk only
3099  * if it returns 0.
3100  *
3101  * Any other return value means you need to call btrfs_commit_transaction.
3102  * Some of the edge cases for fsyncing directories that have had unlinks
3103  * or renames done in the past mean that sometimes the only safe
3104  * fsync is to commit the whole FS.  When btrfs_sync_log returns -EAGAIN,
3105  * that has happened.
3106  */
3107 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3108 		   struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3109 {
3110 	int index1;
3111 	int index2;
3112 	int mark;
3113 	int ret;
3114 	struct btrfs_fs_info *fs_info = root->fs_info;
3115 	struct btrfs_root *log = root->log_root;
3116 	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3117 	struct btrfs_root_item new_root_item;
3118 	int log_transid = 0;
3119 	struct btrfs_log_ctx root_log_ctx;
3120 	struct blk_plug plug;
3121 	u64 log_root_start;
3122 	u64 log_root_level;
3123 
3124 	mutex_lock(&root->log_mutex);
3125 	log_transid = ctx->log_transid;
3126 	if (root->log_transid_committed >= log_transid) {
3127 		mutex_unlock(&root->log_mutex);
3128 		return ctx->log_ret;
3129 	}
3130 
3131 	index1 = log_transid % 2;
3132 	if (atomic_read(&root->log_commit[index1])) {
3133 		wait_log_commit(root, log_transid);
3134 		mutex_unlock(&root->log_mutex);
3135 		return ctx->log_ret;
3136 	}
3137 	ASSERT(log_transid == root->log_transid);
3138 	atomic_set(&root->log_commit[index1], 1);
3139 
3140 	/* wait for previous tree log sync to complete */
3141 	if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3142 		wait_log_commit(root, log_transid - 1);
3143 
3144 	while (1) {
3145 		int batch = atomic_read(&root->log_batch);
3146 		/* when we're on an ssd, just kick the log commit out */
3147 		if (!btrfs_test_opt(fs_info, SSD) &&
3148 		    test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3149 			mutex_unlock(&root->log_mutex);
3150 			schedule_timeout_uninterruptible(1);
3151 			mutex_lock(&root->log_mutex);
3152 		}
3153 		wait_for_writer(root);
3154 		if (batch == atomic_read(&root->log_batch))
3155 			break;
3156 	}
3157 
3158 	/* bail out if we need to do a full commit */
3159 	if (btrfs_need_log_full_commit(trans)) {
3160 		ret = -EAGAIN;
3161 		mutex_unlock(&root->log_mutex);
3162 		goto out;
3163 	}
3164 
3165 	if (log_transid % 2 == 0)
3166 		mark = EXTENT_DIRTY;
3167 	else
3168 		mark = EXTENT_NEW;
3169 
3170 	/* we start IO on  all the marked extents here, but we don't actually
3171 	 * wait for them until later.
3172 	 */
3173 	blk_start_plug(&plug);
3174 	ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3175 	/*
3176 	 * -EAGAIN happens when someone, e.g., a concurrent transaction
3177 	 *  commit, writes a dirty extent in this tree-log commit. This
3178 	 *  concurrent write will create a hole writing out the extents,
3179 	 *  and we cannot proceed on a zoned filesystem, requiring
3180 	 *  sequential writing. While we can bail out to a full commit
3181 	 *  here, but we can continue hoping the concurrent writing fills
3182 	 *  the hole.
3183 	 */
3184 	if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
3185 		ret = 0;
3186 	if (ret) {
3187 		blk_finish_plug(&plug);
3188 		btrfs_abort_transaction(trans, ret);
3189 		btrfs_set_log_full_commit(trans);
3190 		mutex_unlock(&root->log_mutex);
3191 		goto out;
3192 	}
3193 
3194 	/*
3195 	 * We _must_ update under the root->log_mutex in order to make sure we
3196 	 * have a consistent view of the log root we are trying to commit at
3197 	 * this moment.
3198 	 *
3199 	 * We _must_ copy this into a local copy, because we are not holding the
3200 	 * log_root_tree->log_mutex yet.  This is important because when we
3201 	 * commit the log_root_tree we must have a consistent view of the
3202 	 * log_root_tree when we update the super block to point at the
3203 	 * log_root_tree bytenr.  If we update the log_root_tree here we'll race
3204 	 * with the commit and possibly point at the new block which we may not
3205 	 * have written out.
3206 	 */
3207 	btrfs_set_root_node(&log->root_item, log->node);
3208 	memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3209 
3210 	root->log_transid++;
3211 	log->log_transid = root->log_transid;
3212 	root->log_start_pid = 0;
3213 	/*
3214 	 * IO has been started, blocks of the log tree have WRITTEN flag set
3215 	 * in their headers. new modifications of the log will be written to
3216 	 * new positions. so it's safe to allow log writers to go in.
3217 	 */
3218 	mutex_unlock(&root->log_mutex);
3219 
3220 	if (btrfs_is_zoned(fs_info)) {
3221 		mutex_lock(&fs_info->tree_root->log_mutex);
3222 		if (!log_root_tree->node) {
3223 			ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
3224 			if (ret) {
3225 				mutex_unlock(&fs_info->tree_root->log_mutex);
3226 				goto out;
3227 			}
3228 		}
3229 		mutex_unlock(&fs_info->tree_root->log_mutex);
3230 	}
3231 
3232 	btrfs_init_log_ctx(&root_log_ctx, NULL);
3233 
3234 	mutex_lock(&log_root_tree->log_mutex);
3235 
3236 	index2 = log_root_tree->log_transid % 2;
3237 	list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3238 	root_log_ctx.log_transid = log_root_tree->log_transid;
3239 
3240 	/*
3241 	 * Now we are safe to update the log_root_tree because we're under the
3242 	 * log_mutex, and we're a current writer so we're holding the commit
3243 	 * open until we drop the log_mutex.
3244 	 */
3245 	ret = update_log_root(trans, log, &new_root_item);
3246 	if (ret) {
3247 		if (!list_empty(&root_log_ctx.list))
3248 			list_del_init(&root_log_ctx.list);
3249 
3250 		blk_finish_plug(&plug);
3251 		btrfs_set_log_full_commit(trans);
3252 
3253 		if (ret != -ENOSPC) {
3254 			btrfs_abort_transaction(trans, ret);
3255 			mutex_unlock(&log_root_tree->log_mutex);
3256 			goto out;
3257 		}
3258 		btrfs_wait_tree_log_extents(log, mark);
3259 		mutex_unlock(&log_root_tree->log_mutex);
3260 		ret = -EAGAIN;
3261 		goto out;
3262 	}
3263 
3264 	if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3265 		blk_finish_plug(&plug);
3266 		list_del_init(&root_log_ctx.list);
3267 		mutex_unlock(&log_root_tree->log_mutex);
3268 		ret = root_log_ctx.log_ret;
3269 		goto out;
3270 	}
3271 
3272 	index2 = root_log_ctx.log_transid % 2;
3273 	if (atomic_read(&log_root_tree->log_commit[index2])) {
3274 		blk_finish_plug(&plug);
3275 		ret = btrfs_wait_tree_log_extents(log, mark);
3276 		wait_log_commit(log_root_tree,
3277 				root_log_ctx.log_transid);
3278 		mutex_unlock(&log_root_tree->log_mutex);
3279 		if (!ret)
3280 			ret = root_log_ctx.log_ret;
3281 		goto out;
3282 	}
3283 	ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3284 	atomic_set(&log_root_tree->log_commit[index2], 1);
3285 
3286 	if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3287 		wait_log_commit(log_root_tree,
3288 				root_log_ctx.log_transid - 1);
3289 	}
3290 
3291 	/*
3292 	 * now that we've moved on to the tree of log tree roots,
3293 	 * check the full commit flag again
3294 	 */
3295 	if (btrfs_need_log_full_commit(trans)) {
3296 		blk_finish_plug(&plug);
3297 		btrfs_wait_tree_log_extents(log, mark);
3298 		mutex_unlock(&log_root_tree->log_mutex);
3299 		ret = -EAGAIN;
3300 		goto out_wake_log_root;
3301 	}
3302 
3303 	ret = btrfs_write_marked_extents(fs_info,
3304 					 &log_root_tree->dirty_log_pages,
3305 					 EXTENT_DIRTY | EXTENT_NEW);
3306 	blk_finish_plug(&plug);
3307 	/*
3308 	 * As described above, -EAGAIN indicates a hole in the extents. We
3309 	 * cannot wait for these write outs since the waiting cause a
3310 	 * deadlock. Bail out to the full commit instead.
3311 	 */
3312 	if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3313 		btrfs_set_log_full_commit(trans);
3314 		btrfs_wait_tree_log_extents(log, mark);
3315 		mutex_unlock(&log_root_tree->log_mutex);
3316 		goto out_wake_log_root;
3317 	} else if (ret) {
3318 		btrfs_set_log_full_commit(trans);
3319 		btrfs_abort_transaction(trans, ret);
3320 		mutex_unlock(&log_root_tree->log_mutex);
3321 		goto out_wake_log_root;
3322 	}
3323 	ret = btrfs_wait_tree_log_extents(log, mark);
3324 	if (!ret)
3325 		ret = btrfs_wait_tree_log_extents(log_root_tree,
3326 						  EXTENT_NEW | EXTENT_DIRTY);
3327 	if (ret) {
3328 		btrfs_set_log_full_commit(trans);
3329 		mutex_unlock(&log_root_tree->log_mutex);
3330 		goto out_wake_log_root;
3331 	}
3332 
3333 	log_root_start = log_root_tree->node->start;
3334 	log_root_level = btrfs_header_level(log_root_tree->node);
3335 	log_root_tree->log_transid++;
3336 	mutex_unlock(&log_root_tree->log_mutex);
3337 
3338 	/*
3339 	 * Here we are guaranteed that nobody is going to write the superblock
3340 	 * for the current transaction before us and that neither we do write
3341 	 * our superblock before the previous transaction finishes its commit
3342 	 * and writes its superblock, because:
3343 	 *
3344 	 * 1) We are holding a handle on the current transaction, so no body
3345 	 *    can commit it until we release the handle;
3346 	 *
3347 	 * 2) Before writing our superblock we acquire the tree_log_mutex, so
3348 	 *    if the previous transaction is still committing, and hasn't yet
3349 	 *    written its superblock, we wait for it to do it, because a
3350 	 *    transaction commit acquires the tree_log_mutex when the commit
3351 	 *    begins and releases it only after writing its superblock.
3352 	 */
3353 	mutex_lock(&fs_info->tree_log_mutex);
3354 
3355 	/*
3356 	 * The previous transaction writeout phase could have failed, and thus
3357 	 * marked the fs in an error state.  We must not commit here, as we
3358 	 * could have updated our generation in the super_for_commit and
3359 	 * writing the super here would result in transid mismatches.  If there
3360 	 * is an error here just bail.
3361 	 */
3362 	if (BTRFS_FS_ERROR(fs_info)) {
3363 		ret = -EIO;
3364 		btrfs_set_log_full_commit(trans);
3365 		btrfs_abort_transaction(trans, ret);
3366 		mutex_unlock(&fs_info->tree_log_mutex);
3367 		goto out_wake_log_root;
3368 	}
3369 
3370 	btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3371 	btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3372 	ret = write_all_supers(fs_info, 1);
3373 	mutex_unlock(&fs_info->tree_log_mutex);
3374 	if (ret) {
3375 		btrfs_set_log_full_commit(trans);
3376 		btrfs_abort_transaction(trans, ret);
3377 		goto out_wake_log_root;
3378 	}
3379 
3380 	/*
3381 	 * We know there can only be one task here, since we have not yet set
3382 	 * root->log_commit[index1] to 0 and any task attempting to sync the
3383 	 * log must wait for the previous log transaction to commit if it's
3384 	 * still in progress or wait for the current log transaction commit if
3385 	 * someone else already started it. We use <= and not < because the
3386 	 * first log transaction has an ID of 0.
3387 	 */
3388 	ASSERT(root->last_log_commit <= log_transid);
3389 	root->last_log_commit = log_transid;
3390 
3391 out_wake_log_root:
3392 	mutex_lock(&log_root_tree->log_mutex);
3393 	btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3394 
3395 	log_root_tree->log_transid_committed++;
3396 	atomic_set(&log_root_tree->log_commit[index2], 0);
3397 	mutex_unlock(&log_root_tree->log_mutex);
3398 
3399 	/*
3400 	 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3401 	 * all the updates above are seen by the woken threads. It might not be
3402 	 * necessary, but proving that seems to be hard.
3403 	 */
3404 	cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3405 out:
3406 	mutex_lock(&root->log_mutex);
3407 	btrfs_remove_all_log_ctxs(root, index1, ret);
3408 	root->log_transid_committed++;
3409 	atomic_set(&root->log_commit[index1], 0);
3410 	mutex_unlock(&root->log_mutex);
3411 
3412 	/*
3413 	 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3414 	 * all the updates above are seen by the woken threads. It might not be
3415 	 * necessary, but proving that seems to be hard.
3416 	 */
3417 	cond_wake_up(&root->log_commit_wait[index1]);
3418 	return ret;
3419 }
3420 
3421 static void free_log_tree(struct btrfs_trans_handle *trans,
3422 			  struct btrfs_root *log)
3423 {
3424 	int ret;
3425 	struct walk_control wc = {
3426 		.free = 1,
3427 		.process_func = process_one_buffer
3428 	};
3429 
3430 	if (log->node) {
3431 		ret = walk_log_tree(trans, log, &wc);
3432 		if (ret) {
3433 			if (trans)
3434 				btrfs_abort_transaction(trans, ret);
3435 			else
3436 				btrfs_handle_fs_error(log->fs_info, ret, NULL);
3437 		}
3438 	}
3439 
3440 	clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3441 			  EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3442 	extent_io_tree_release(&log->log_csum_range);
3443 
3444 	btrfs_put_root(log);
3445 }
3446 
3447 /*
3448  * free all the extents used by the tree log.  This should be called
3449  * at commit time of the full transaction
3450  */
3451 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3452 {
3453 	if (root->log_root) {
3454 		free_log_tree(trans, root->log_root);
3455 		root->log_root = NULL;
3456 		clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3457 	}
3458 	return 0;
3459 }
3460 
3461 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3462 			     struct btrfs_fs_info *fs_info)
3463 {
3464 	if (fs_info->log_root_tree) {
3465 		free_log_tree(trans, fs_info->log_root_tree);
3466 		fs_info->log_root_tree = NULL;
3467 		clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3468 	}
3469 	return 0;
3470 }
3471 
3472 /*
3473  * Check if an inode was logged in the current transaction. This may often
3474  * return some false positives, because logged_trans is an in memory only field,
3475  * not persisted anywhere. This is meant to be used in contexts where a false
3476  * positive has no functional consequences.
3477  */
3478 static bool inode_logged(struct btrfs_trans_handle *trans,
3479 			 struct btrfs_inode *inode)
3480 {
3481 	if (inode->logged_trans == trans->transid)
3482 		return true;
3483 
3484 	if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
3485 		return false;
3486 
3487 	/*
3488 	 * The inode's logged_trans is always 0 when we load it (because it is
3489 	 * not persisted in the inode item or elsewhere). So if it is 0, the
3490 	 * inode was last modified in the current transaction then the inode may
3491 	 * have been logged before in the current transaction, then evicted and
3492 	 * loaded again in the current transaction - or may have never been logged
3493 	 * in the current transaction, but since we can not be sure, we have to
3494 	 * assume it was, otherwise our callers can leave an inconsistent log.
3495 	 */
3496 	if (inode->logged_trans == 0 &&
3497 	    inode->last_trans == trans->transid &&
3498 	    !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3499 		return true;
3500 
3501 	return false;
3502 }
3503 
3504 /*
3505  * If both a file and directory are logged, and unlinks or renames are
3506  * mixed in, we have a few interesting corners:
3507  *
3508  * create file X in dir Y
3509  * link file X to X.link in dir Y
3510  * fsync file X
3511  * unlink file X but leave X.link
3512  * fsync dir Y
3513  *
3514  * After a crash we would expect only X.link to exist.  But file X
3515  * didn't get fsync'd again so the log has back refs for X and X.link.
3516  *
3517  * We solve this by removing directory entries and inode backrefs from the
3518  * log when a file that was logged in the current transaction is
3519  * unlinked.  Any later fsync will include the updated log entries, and
3520  * we'll be able to reconstruct the proper directory items from backrefs.
3521  *
3522  * This optimizations allows us to avoid relogging the entire inode
3523  * or the entire directory.
3524  */
3525 void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3526 				  struct btrfs_root *root,
3527 				  const char *name, int name_len,
3528 				  struct btrfs_inode *dir, u64 index)
3529 {
3530 	struct btrfs_root *log;
3531 	struct btrfs_dir_item *di;
3532 	struct btrfs_path *path;
3533 	int ret;
3534 	int err = 0;
3535 	u64 dir_ino = btrfs_ino(dir);
3536 
3537 	if (!inode_logged(trans, dir))
3538 		return;
3539 
3540 	ret = join_running_log_trans(root);
3541 	if (ret)
3542 		return;
3543 
3544 	mutex_lock(&dir->log_mutex);
3545 
3546 	log = root->log_root;
3547 	path = btrfs_alloc_path();
3548 	if (!path) {
3549 		err = -ENOMEM;
3550 		goto out_unlock;
3551 	}
3552 
3553 	di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3554 				   name, name_len, -1);
3555 	if (IS_ERR(di)) {
3556 		err = PTR_ERR(di);
3557 		goto fail;
3558 	}
3559 	if (di) {
3560 		ret = btrfs_delete_one_dir_name(trans, log, path, di);
3561 		if (ret) {
3562 			err = ret;
3563 			goto fail;
3564 		}
3565 	}
3566 	btrfs_release_path(path);
3567 	di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3568 					 index, name, name_len, -1);
3569 	if (IS_ERR(di)) {
3570 		err = PTR_ERR(di);
3571 		goto fail;
3572 	}
3573 	if (di) {
3574 		ret = btrfs_delete_one_dir_name(trans, log, path, di);
3575 		if (ret) {
3576 			err = ret;
3577 			goto fail;
3578 		}
3579 	}
3580 
3581 	/*
3582 	 * We do not need to update the size field of the directory's inode item
3583 	 * because on log replay we update the field to reflect all existing
3584 	 * entries in the directory (see overwrite_item()).
3585 	 */
3586 fail:
3587 	btrfs_free_path(path);
3588 out_unlock:
3589 	mutex_unlock(&dir->log_mutex);
3590 	if (err < 0)
3591 		btrfs_set_log_full_commit(trans);
3592 	btrfs_end_log_trans(root);
3593 }
3594 
3595 /* see comments for btrfs_del_dir_entries_in_log */
3596 void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3597 				struct btrfs_root *root,
3598 				const char *name, int name_len,
3599 				struct btrfs_inode *inode, u64 dirid)
3600 {
3601 	struct btrfs_root *log;
3602 	u64 index;
3603 	int ret;
3604 
3605 	if (!inode_logged(trans, inode))
3606 		return;
3607 
3608 	ret = join_running_log_trans(root);
3609 	if (ret)
3610 		return;
3611 	log = root->log_root;
3612 	mutex_lock(&inode->log_mutex);
3613 
3614 	ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3615 				  dirid, &index);
3616 	mutex_unlock(&inode->log_mutex);
3617 	if (ret < 0 && ret != -ENOENT)
3618 		btrfs_set_log_full_commit(trans);
3619 	btrfs_end_log_trans(root);
3620 }
3621 
3622 /*
3623  * creates a range item in the log for 'dirid'.  first_offset and
3624  * last_offset tell us which parts of the key space the log should
3625  * be considered authoritative for.
3626  */
3627 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3628 				       struct btrfs_root *log,
3629 				       struct btrfs_path *path,
3630 				       int key_type, u64 dirid,
3631 				       u64 first_offset, u64 last_offset)
3632 {
3633 	int ret;
3634 	struct btrfs_key key;
3635 	struct btrfs_dir_log_item *item;
3636 
3637 	key.objectid = dirid;
3638 	key.offset = first_offset;
3639 	if (key_type == BTRFS_DIR_ITEM_KEY)
3640 		key.type = BTRFS_DIR_LOG_ITEM_KEY;
3641 	else
3642 		key.type = BTRFS_DIR_LOG_INDEX_KEY;
3643 	ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3644 	if (ret)
3645 		return ret;
3646 
3647 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3648 			      struct btrfs_dir_log_item);
3649 	btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3650 	btrfs_mark_buffer_dirty(path->nodes[0]);
3651 	btrfs_release_path(path);
3652 	return 0;
3653 }
3654 
3655 static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
3656 				 struct btrfs_root *log,
3657 				 struct extent_buffer *src,
3658 				 struct btrfs_path *dst_path,
3659 				 int start_slot,
3660 				 int count)
3661 {
3662 	char *ins_data = NULL;
3663 	struct btrfs_item_batch batch;
3664 	struct extent_buffer *dst;
3665 	unsigned long src_offset;
3666 	unsigned long dst_offset;
3667 	struct btrfs_key key;
3668 	u32 item_size;
3669 	int ret;
3670 	int i;
3671 
3672 	ASSERT(count > 0);
3673 	batch.nr = count;
3674 
3675 	if (count == 1) {
3676 		btrfs_item_key_to_cpu(src, &key, start_slot);
3677 		item_size = btrfs_item_size_nr(src, start_slot);
3678 		batch.keys = &key;
3679 		batch.data_sizes = &item_size;
3680 		batch.total_data_size = item_size;
3681 	} else {
3682 		struct btrfs_key *ins_keys;
3683 		u32 *ins_sizes;
3684 
3685 		ins_data = kmalloc(count * sizeof(u32) +
3686 				   count * sizeof(struct btrfs_key), GFP_NOFS);
3687 		if (!ins_data)
3688 			return -ENOMEM;
3689 
3690 		ins_sizes = (u32 *)ins_data;
3691 		ins_keys = (struct btrfs_key *)(ins_data + count * sizeof(u32));
3692 		batch.keys = ins_keys;
3693 		batch.data_sizes = ins_sizes;
3694 		batch.total_data_size = 0;
3695 
3696 		for (i = 0; i < count; i++) {
3697 			const int slot = start_slot + i;
3698 
3699 			btrfs_item_key_to_cpu(src, &ins_keys[i], slot);
3700 			ins_sizes[i] = btrfs_item_size_nr(src, slot);
3701 			batch.total_data_size += ins_sizes[i];
3702 		}
3703 	}
3704 
3705 	ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
3706 	if (ret)
3707 		goto out;
3708 
3709 	dst = dst_path->nodes[0];
3710 	/*
3711 	 * Copy all the items in bulk, in a single copy operation. Item data is
3712 	 * organized such that it's placed at the end of a leaf and from right
3713 	 * to left. For example, the data for the second item ends at an offset
3714 	 * that matches the offset where the data for the first item starts, the
3715 	 * data for the third item ends at an offset that matches the offset
3716 	 * where the data of the second items starts, and so on.
3717 	 * Therefore our source and destination start offsets for copy match the
3718 	 * offsets of the last items (highest slots).
3719 	 */
3720 	dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1);
3721 	src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
3722 	copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
3723 	btrfs_release_path(dst_path);
3724 out:
3725 	kfree(ins_data);
3726 
3727 	return ret;
3728 }
3729 
3730 static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
3731 				  struct btrfs_inode *inode,
3732 				  struct btrfs_path *path,
3733 				  struct btrfs_path *dst_path,
3734 				  int key_type,
3735 				  struct btrfs_log_ctx *ctx)
3736 {
3737 	struct btrfs_root *log = inode->root->log_root;
3738 	struct extent_buffer *src = path->nodes[0];
3739 	const int nritems = btrfs_header_nritems(src);
3740 	const u64 ino = btrfs_ino(inode);
3741 	const bool inode_logged_before = inode_logged(trans, inode);
3742 	u64 last_logged_key_offset;
3743 	bool last_found = false;
3744 	int batch_start = 0;
3745 	int batch_size = 0;
3746 	int i;
3747 
3748 	if (key_type == BTRFS_DIR_ITEM_KEY)
3749 		last_logged_key_offset = inode->last_dir_item_offset;
3750 	else
3751 		last_logged_key_offset = inode->last_dir_index_offset;
3752 
3753 	for (i = path->slots[0]; i < nritems; i++) {
3754 		struct btrfs_key key;
3755 		int ret;
3756 
3757 		btrfs_item_key_to_cpu(src, &key, i);
3758 
3759 		if (key.objectid != ino || key.type != key_type) {
3760 			last_found = true;
3761 			break;
3762 		}
3763 
3764 		ctx->last_dir_item_offset = key.offset;
3765 		/*
3766 		 * We must make sure that when we log a directory entry, the
3767 		 * corresponding inode, after log replay, has a matching link
3768 		 * count. For example:
3769 		 *
3770 		 * touch foo
3771 		 * mkdir mydir
3772 		 * sync
3773 		 * ln foo mydir/bar
3774 		 * xfs_io -c "fsync" mydir
3775 		 * <crash>
3776 		 * <mount fs and log replay>
3777 		 *
3778 		 * Would result in a fsync log that when replayed, our file inode
3779 		 * would have a link count of 1, but we get two directory entries
3780 		 * pointing to the same inode. After removing one of the names,
3781 		 * it would not be possible to remove the other name, which
3782 		 * resulted always in stale file handle errors, and would not be
3783 		 * possible to rmdir the parent directory, since its i_size could
3784 		 * never be decremented to the value BTRFS_EMPTY_DIR_SIZE,
3785 		 * resulting in -ENOTEMPTY errors.
3786 		 */
3787 		if (!ctx->log_new_dentries) {
3788 			struct btrfs_dir_item *di;
3789 			struct btrfs_key di_key;
3790 
3791 			di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3792 			btrfs_dir_item_key_to_cpu(src, di, &di_key);
3793 			if ((btrfs_dir_transid(src, di) == trans->transid ||
3794 			     btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3795 			    di_key.type != BTRFS_ROOT_ITEM_KEY)
3796 				ctx->log_new_dentries = true;
3797 		}
3798 
3799 		if (!inode_logged_before)
3800 			goto add_to_batch;
3801 
3802 		/*
3803 		 * If we were logged before and have logged dir items, we can skip
3804 		 * checking if any item with a key offset larger than the last one
3805 		 * we logged is in the log tree, saving time and avoiding adding
3806 		 * contention on the log tree.
3807 		 */
3808 		if (key.offset > last_logged_key_offset)
3809 			goto add_to_batch;
3810 		/*
3811 		 * Check if the key was already logged before. If not we can add
3812 		 * it to a batch for bulk insertion.
3813 		 */
3814 		ret = btrfs_search_slot(NULL, log, &key, dst_path, 0, 0);
3815 		if (ret < 0) {
3816 			return ret;
3817 		} else if (ret > 0) {
3818 			btrfs_release_path(dst_path);
3819 			goto add_to_batch;
3820 		}
3821 
3822 		/*
3823 		 * Item exists in the log. Overwrite the item in the log if it
3824 		 * has different content or do nothing if it has exactly the same
3825 		 * content. And then flush the current batch if any - do it after
3826 		 * overwriting the current item, or we would deadlock otherwise,
3827 		 * since we are holding a path for the existing item.
3828 		 */
3829 		ret = do_overwrite_item(trans, log, dst_path, src, i, &key);
3830 		if (ret < 0)
3831 			return ret;
3832 
3833 		if (batch_size > 0) {
3834 			ret = flush_dir_items_batch(trans, log, src, dst_path,
3835 						    batch_start, batch_size);
3836 			if (ret < 0)
3837 				return ret;
3838 			batch_size = 0;
3839 		}
3840 		continue;
3841 add_to_batch:
3842 		if (batch_size == 0)
3843 			batch_start = i;
3844 		batch_size++;
3845 	}
3846 
3847 	if (batch_size > 0) {
3848 		int ret;
3849 
3850 		ret = flush_dir_items_batch(trans, log, src, dst_path,
3851 					    batch_start, batch_size);
3852 		if (ret < 0)
3853 			return ret;
3854 	}
3855 
3856 	return last_found ? 1 : 0;
3857 }
3858 
3859 /*
3860  * log all the items included in the current transaction for a given
3861  * directory.  This also creates the range items in the log tree required
3862  * to replay anything deleted before the fsync
3863  */
3864 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3865 			  struct btrfs_inode *inode,
3866 			  struct btrfs_path *path,
3867 			  struct btrfs_path *dst_path, int key_type,
3868 			  struct btrfs_log_ctx *ctx,
3869 			  u64 min_offset, u64 *last_offset_ret)
3870 {
3871 	struct btrfs_key min_key;
3872 	struct btrfs_root *root = inode->root;
3873 	struct btrfs_root *log = root->log_root;
3874 	int err = 0;
3875 	int ret;
3876 	u64 first_offset = min_offset;
3877 	u64 last_offset = (u64)-1;
3878 	u64 ino = btrfs_ino(inode);
3879 
3880 	min_key.objectid = ino;
3881 	min_key.type = key_type;
3882 	min_key.offset = min_offset;
3883 
3884 	ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3885 
3886 	/*
3887 	 * we didn't find anything from this transaction, see if there
3888 	 * is anything at all
3889 	 */
3890 	if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3891 		min_key.objectid = ino;
3892 		min_key.type = key_type;
3893 		min_key.offset = (u64)-1;
3894 		btrfs_release_path(path);
3895 		ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3896 		if (ret < 0) {
3897 			btrfs_release_path(path);
3898 			return ret;
3899 		}
3900 		ret = btrfs_previous_item(root, path, ino, key_type);
3901 
3902 		/* if ret == 0 there are items for this type,
3903 		 * create a range to tell us the last key of this type.
3904 		 * otherwise, there are no items in this directory after
3905 		 * *min_offset, and we create a range to indicate that.
3906 		 */
3907 		if (ret == 0) {
3908 			struct btrfs_key tmp;
3909 			btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3910 					      path->slots[0]);
3911 			if (key_type == tmp.type)
3912 				first_offset = max(min_offset, tmp.offset) + 1;
3913 		}
3914 		goto done;
3915 	}
3916 
3917 	/* go backward to find any previous key */
3918 	ret = btrfs_previous_item(root, path, ino, key_type);
3919 	if (ret == 0) {
3920 		struct btrfs_key tmp;
3921 		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3922 		if (key_type == tmp.type) {
3923 			first_offset = tmp.offset;
3924 			ret = overwrite_item(trans, log, dst_path,
3925 					     path->nodes[0], path->slots[0],
3926 					     &tmp);
3927 			if (ret) {
3928 				err = ret;
3929 				goto done;
3930 			}
3931 		}
3932 	}
3933 	btrfs_release_path(path);
3934 
3935 	/*
3936 	 * Find the first key from this transaction again.  See the note for
3937 	 * log_new_dir_dentries, if we're logging a directory recursively we
3938 	 * won't be holding its i_mutex, which means we can modify the directory
3939 	 * while we're logging it.  If we remove an entry between our first
3940 	 * search and this search we'll not find the key again and can just
3941 	 * bail.
3942 	 */
3943 search:
3944 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3945 	if (ret != 0)
3946 		goto done;
3947 
3948 	/*
3949 	 * we have a block from this transaction, log every item in it
3950 	 * from our directory
3951 	 */
3952 	while (1) {
3953 		ret = process_dir_items_leaf(trans, inode, path, dst_path,
3954 					     key_type, ctx);
3955 		if (ret != 0) {
3956 			if (ret < 0)
3957 				err = ret;
3958 			goto done;
3959 		}
3960 		path->slots[0] = btrfs_header_nritems(path->nodes[0]);
3961 
3962 		/*
3963 		 * look ahead to the next item and see if it is also
3964 		 * from this directory and from this transaction
3965 		 */
3966 		ret = btrfs_next_leaf(root, path);
3967 		if (ret) {
3968 			if (ret == 1)
3969 				last_offset = (u64)-1;
3970 			else
3971 				err = ret;
3972 			goto done;
3973 		}
3974 		btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]);
3975 		if (min_key.objectid != ino || min_key.type != key_type) {
3976 			last_offset = (u64)-1;
3977 			goto done;
3978 		}
3979 		if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3980 			ret = overwrite_item(trans, log, dst_path,
3981 					     path->nodes[0], path->slots[0],
3982 					     &min_key);
3983 			if (ret)
3984 				err = ret;
3985 			else
3986 				last_offset = min_key.offset;
3987 			goto done;
3988 		}
3989 		if (need_resched()) {
3990 			btrfs_release_path(path);
3991 			cond_resched();
3992 			goto search;
3993 		}
3994 	}
3995 done:
3996 	btrfs_release_path(path);
3997 	btrfs_release_path(dst_path);
3998 
3999 	if (err == 0) {
4000 		*last_offset_ret = last_offset;
4001 		/*
4002 		 * insert the log range keys to indicate where the log
4003 		 * is valid
4004 		 */
4005 		ret = insert_dir_log_key(trans, log, path, key_type,
4006 					 ino, first_offset, last_offset);
4007 		if (ret)
4008 			err = ret;
4009 	}
4010 	return err;
4011 }
4012 
4013 /*
4014  * logging directories is very similar to logging inodes, We find all the items
4015  * from the current transaction and write them to the log.
4016  *
4017  * The recovery code scans the directory in the subvolume, and if it finds a
4018  * key in the range logged that is not present in the log tree, then it means
4019  * that dir entry was unlinked during the transaction.
4020  *
4021  * In order for that scan to work, we must include one key smaller than
4022  * the smallest logged by this transaction and one key larger than the largest
4023  * key logged by this transaction.
4024  */
4025 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
4026 			  struct btrfs_inode *inode,
4027 			  struct btrfs_path *path,
4028 			  struct btrfs_path *dst_path,
4029 			  struct btrfs_log_ctx *ctx)
4030 {
4031 	u64 min_key;
4032 	u64 max_key;
4033 	int ret;
4034 	int key_type = BTRFS_DIR_ITEM_KEY;
4035 
4036 	/*
4037 	 * If this is the first time we are being logged in the current
4038 	 * transaction, or we were logged before but the inode was evicted and
4039 	 * reloaded later, in which case its logged_trans is 0, reset the values
4040 	 * of the last logged key offsets. Note that we don't use the helper
4041 	 * function inode_logged() here - that is because the function returns
4042 	 * true after an inode eviction, assuming the worst case as it can not
4043 	 * know for sure if the inode was logged before. So we can not skip key
4044 	 * searches in the case the inode was evicted, because it may not have
4045 	 * been logged in this transaction and may have been logged in a past
4046 	 * transaction, so we need to reset the last dir item and index offsets
4047 	 * to (u64)-1.
4048 	 */
4049 	if (inode->logged_trans != trans->transid) {
4050 		inode->last_dir_item_offset = (u64)-1;
4051 		inode->last_dir_index_offset = (u64)-1;
4052 	}
4053 again:
4054 	min_key = 0;
4055 	max_key = 0;
4056 	if (key_type == BTRFS_DIR_ITEM_KEY)
4057 		ctx->last_dir_item_offset = inode->last_dir_item_offset;
4058 	else
4059 		ctx->last_dir_item_offset = inode->last_dir_index_offset;
4060 
4061 	while (1) {
4062 		ret = log_dir_items(trans, inode, path, dst_path, key_type,
4063 				ctx, min_key, &max_key);
4064 		if (ret)
4065 			return ret;
4066 		if (max_key == (u64)-1)
4067 			break;
4068 		min_key = max_key + 1;
4069 	}
4070 
4071 	if (key_type == BTRFS_DIR_ITEM_KEY) {
4072 		inode->last_dir_item_offset = ctx->last_dir_item_offset;
4073 		key_type = BTRFS_DIR_INDEX_KEY;
4074 		goto again;
4075 	} else {
4076 		inode->last_dir_index_offset = ctx->last_dir_item_offset;
4077 	}
4078 	return 0;
4079 }
4080 
4081 /*
4082  * a helper function to drop items from the log before we relog an
4083  * inode.  max_key_type indicates the highest item type to remove.
4084  * This cannot be run for file data extents because it does not
4085  * free the extents they point to.
4086  */
4087 static int drop_inode_items(struct btrfs_trans_handle *trans,
4088 				  struct btrfs_root *log,
4089 				  struct btrfs_path *path,
4090 				  struct btrfs_inode *inode,
4091 				  int max_key_type)
4092 {
4093 	int ret;
4094 	struct btrfs_key key;
4095 	struct btrfs_key found_key;
4096 	int start_slot;
4097 
4098 	if (!inode_logged(trans, inode))
4099 		return 0;
4100 
4101 	key.objectid = btrfs_ino(inode);
4102 	key.type = max_key_type;
4103 	key.offset = (u64)-1;
4104 
4105 	while (1) {
4106 		ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
4107 		BUG_ON(ret == 0); /* Logic error */
4108 		if (ret < 0)
4109 			break;
4110 
4111 		if (path->slots[0] == 0)
4112 			break;
4113 
4114 		path->slots[0]--;
4115 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4116 				      path->slots[0]);
4117 
4118 		if (found_key.objectid != key.objectid)
4119 			break;
4120 
4121 		found_key.offset = 0;
4122 		found_key.type = 0;
4123 		ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
4124 		if (ret < 0)
4125 			break;
4126 
4127 		ret = btrfs_del_items(trans, log, path, start_slot,
4128 				      path->slots[0] - start_slot + 1);
4129 		/*
4130 		 * If start slot isn't 0 then we don't need to re-search, we've
4131 		 * found the last guy with the objectid in this tree.
4132 		 */
4133 		if (ret || start_slot != 0)
4134 			break;
4135 		btrfs_release_path(path);
4136 	}
4137 	btrfs_release_path(path);
4138 	if (ret > 0)
4139 		ret = 0;
4140 	return ret;
4141 }
4142 
4143 static int truncate_inode_items(struct btrfs_trans_handle *trans,
4144 				struct btrfs_root *log_root,
4145 				struct btrfs_inode *inode,
4146 				u64 new_size, u32 min_type)
4147 {
4148 	int ret;
4149 
4150 	do {
4151 		ret = btrfs_truncate_inode_items(trans, log_root, inode,
4152 						 new_size, min_type, NULL);
4153 	} while (ret == -EAGAIN);
4154 
4155 	return ret;
4156 }
4157 
4158 static void fill_inode_item(struct btrfs_trans_handle *trans,
4159 			    struct extent_buffer *leaf,
4160 			    struct btrfs_inode_item *item,
4161 			    struct inode *inode, int log_inode_only,
4162 			    u64 logged_isize)
4163 {
4164 	struct btrfs_map_token token;
4165 	u64 flags;
4166 
4167 	btrfs_init_map_token(&token, leaf);
4168 
4169 	if (log_inode_only) {
4170 		/* set the generation to zero so the recover code
4171 		 * can tell the difference between an logging
4172 		 * just to say 'this inode exists' and a logging
4173 		 * to say 'update this inode with these values'
4174 		 */
4175 		btrfs_set_token_inode_generation(&token, item, 0);
4176 		btrfs_set_token_inode_size(&token, item, logged_isize);
4177 	} else {
4178 		btrfs_set_token_inode_generation(&token, item,
4179 						 BTRFS_I(inode)->generation);
4180 		btrfs_set_token_inode_size(&token, item, inode->i_size);
4181 	}
4182 
4183 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4184 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4185 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4186 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4187 
4188 	btrfs_set_token_timespec_sec(&token, &item->atime,
4189 				     inode->i_atime.tv_sec);
4190 	btrfs_set_token_timespec_nsec(&token, &item->atime,
4191 				      inode->i_atime.tv_nsec);
4192 
4193 	btrfs_set_token_timespec_sec(&token, &item->mtime,
4194 				     inode->i_mtime.tv_sec);
4195 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
4196 				      inode->i_mtime.tv_nsec);
4197 
4198 	btrfs_set_token_timespec_sec(&token, &item->ctime,
4199 				     inode->i_ctime.tv_sec);
4200 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
4201 				      inode->i_ctime.tv_nsec);
4202 
4203 	/*
4204 	 * We do not need to set the nbytes field, in fact during a fast fsync
4205 	 * its value may not even be correct, since a fast fsync does not wait
4206 	 * for ordered extent completion, which is where we update nbytes, it
4207 	 * only waits for writeback to complete. During log replay as we find
4208 	 * file extent items and replay them, we adjust the nbytes field of the
4209 	 * inode item in subvolume tree as needed (see overwrite_item()).
4210 	 */
4211 
4212 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4213 	btrfs_set_token_inode_transid(&token, item, trans->transid);
4214 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4215 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4216 					  BTRFS_I(inode)->ro_flags);
4217 	btrfs_set_token_inode_flags(&token, item, flags);
4218 	btrfs_set_token_inode_block_group(&token, item, 0);
4219 }
4220 
4221 static int log_inode_item(struct btrfs_trans_handle *trans,
4222 			  struct btrfs_root *log, struct btrfs_path *path,
4223 			  struct btrfs_inode *inode, bool inode_item_dropped)
4224 {
4225 	struct btrfs_inode_item *inode_item;
4226 	int ret;
4227 
4228 	/*
4229 	 * If we are doing a fast fsync and the inode was logged before in the
4230 	 * current transaction, then we know the inode was previously logged and
4231 	 * it exists in the log tree. For performance reasons, in this case use
4232 	 * btrfs_search_slot() directly with ins_len set to 0 so that we never
4233 	 * attempt a write lock on the leaf's parent, which adds unnecessary lock
4234 	 * contention in case there are concurrent fsyncs for other inodes of the
4235 	 * same subvolume. Using btrfs_insert_empty_item() when the inode item
4236 	 * already exists can also result in unnecessarily splitting a leaf.
4237 	 */
4238 	if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4239 		ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4240 		ASSERT(ret <= 0);
4241 		if (ret > 0)
4242 			ret = -ENOENT;
4243 	} else {
4244 		/*
4245 		 * This means it is the first fsync in the current transaction,
4246 		 * so the inode item is not in the log and we need to insert it.
4247 		 * We can never get -EEXIST because we are only called for a fast
4248 		 * fsync and in case an inode eviction happens after the inode was
4249 		 * logged before in the current transaction, when we load again
4250 		 * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
4251 		 * flags and set ->logged_trans to 0.
4252 		 */
4253 		ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4254 					      sizeof(*inode_item));
4255 		ASSERT(ret != -EEXIST);
4256 	}
4257 	if (ret)
4258 		return ret;
4259 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4260 				    struct btrfs_inode_item);
4261 	fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4262 			0, 0);
4263 	btrfs_release_path(path);
4264 	return 0;
4265 }
4266 
4267 static int log_csums(struct btrfs_trans_handle *trans,
4268 		     struct btrfs_inode *inode,
4269 		     struct btrfs_root *log_root,
4270 		     struct btrfs_ordered_sum *sums)
4271 {
4272 	const u64 lock_end = sums->bytenr + sums->len - 1;
4273 	struct extent_state *cached_state = NULL;
4274 	int ret;
4275 
4276 	/*
4277 	 * If this inode was not used for reflink operations in the current
4278 	 * transaction with new extents, then do the fast path, no need to
4279 	 * worry about logging checksum items with overlapping ranges.
4280 	 */
4281 	if (inode->last_reflink_trans < trans->transid)
4282 		return btrfs_csum_file_blocks(trans, log_root, sums);
4283 
4284 	/*
4285 	 * Serialize logging for checksums. This is to avoid racing with the
4286 	 * same checksum being logged by another task that is logging another
4287 	 * file which happens to refer to the same extent as well. Such races
4288 	 * can leave checksum items in the log with overlapping ranges.
4289 	 */
4290 	ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
4291 			       lock_end, &cached_state);
4292 	if (ret)
4293 		return ret;
4294 	/*
4295 	 * Due to extent cloning, we might have logged a csum item that covers a
4296 	 * subrange of a cloned extent, and later we can end up logging a csum
4297 	 * item for a larger subrange of the same extent or the entire range.
4298 	 * This would leave csum items in the log tree that cover the same range
4299 	 * and break the searches for checksums in the log tree, resulting in
4300 	 * some checksums missing in the fs/subvolume tree. So just delete (or
4301 	 * trim and adjust) any existing csum items in the log for this range.
4302 	 */
4303 	ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4304 	if (!ret)
4305 		ret = btrfs_csum_file_blocks(trans, log_root, sums);
4306 
4307 	unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
4308 			     &cached_state);
4309 
4310 	return ret;
4311 }
4312 
4313 static noinline int copy_items(struct btrfs_trans_handle *trans,
4314 			       struct btrfs_inode *inode,
4315 			       struct btrfs_path *dst_path,
4316 			       struct btrfs_path *src_path,
4317 			       int start_slot, int nr, int inode_only,
4318 			       u64 logged_isize)
4319 {
4320 	struct btrfs_fs_info *fs_info = trans->fs_info;
4321 	unsigned long src_offset;
4322 	unsigned long dst_offset;
4323 	struct btrfs_root *log = inode->root->log_root;
4324 	struct btrfs_file_extent_item *extent;
4325 	struct btrfs_inode_item *inode_item;
4326 	struct extent_buffer *src = src_path->nodes[0];
4327 	int ret;
4328 	struct btrfs_key *ins_keys;
4329 	u32 *ins_sizes;
4330 	struct btrfs_item_batch batch;
4331 	char *ins_data;
4332 	int i;
4333 	struct list_head ordered_sums;
4334 	int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
4335 
4336 	INIT_LIST_HEAD(&ordered_sums);
4337 
4338 	ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4339 			   nr * sizeof(u32), GFP_NOFS);
4340 	if (!ins_data)
4341 		return -ENOMEM;
4342 
4343 	ins_sizes = (u32 *)ins_data;
4344 	ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4345 	batch.keys = ins_keys;
4346 	batch.data_sizes = ins_sizes;
4347 	batch.total_data_size = 0;
4348 	batch.nr = nr;
4349 
4350 	for (i = 0; i < nr; i++) {
4351 		ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
4352 		batch.total_data_size += ins_sizes[i];
4353 		btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
4354 	}
4355 	ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
4356 	if (ret) {
4357 		kfree(ins_data);
4358 		return ret;
4359 	}
4360 
4361 	for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4362 		dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4363 						   dst_path->slots[0]);
4364 
4365 		src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4366 
4367 		if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4368 			inode_item = btrfs_item_ptr(dst_path->nodes[0],
4369 						    dst_path->slots[0],
4370 						    struct btrfs_inode_item);
4371 			fill_inode_item(trans, dst_path->nodes[0], inode_item,
4372 					&inode->vfs_inode,
4373 					inode_only == LOG_INODE_EXISTS,
4374 					logged_isize);
4375 		} else {
4376 			copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4377 					   src_offset, ins_sizes[i]);
4378 		}
4379 
4380 		/* take a reference on file data extents so that truncates
4381 		 * or deletes of this inode don't have to relog the inode
4382 		 * again
4383 		 */
4384 		if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4385 		    !skip_csum) {
4386 			int found_type;
4387 			extent = btrfs_item_ptr(src, start_slot + i,
4388 						struct btrfs_file_extent_item);
4389 
4390 			if (btrfs_file_extent_generation(src, extent) < trans->transid)
4391 				continue;
4392 
4393 			found_type = btrfs_file_extent_type(src, extent);
4394 			if (found_type == BTRFS_FILE_EXTENT_REG) {
4395 				u64 ds, dl, cs, cl;
4396 				ds = btrfs_file_extent_disk_bytenr(src,
4397 								extent);
4398 				/* ds == 0 is a hole */
4399 				if (ds == 0)
4400 					continue;
4401 
4402 				dl = btrfs_file_extent_disk_num_bytes(src,
4403 								extent);
4404 				cs = btrfs_file_extent_offset(src, extent);
4405 				cl = btrfs_file_extent_num_bytes(src,
4406 								extent);
4407 				if (btrfs_file_extent_compression(src,
4408 								  extent)) {
4409 					cs = 0;
4410 					cl = dl;
4411 				}
4412 
4413 				ret = btrfs_lookup_csums_range(
4414 						fs_info->csum_root,
4415 						ds + cs, ds + cs + cl - 1,
4416 						&ordered_sums, 0);
4417 				if (ret)
4418 					break;
4419 			}
4420 		}
4421 	}
4422 
4423 	btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4424 	btrfs_release_path(dst_path);
4425 	kfree(ins_data);
4426 
4427 	/*
4428 	 * we have to do this after the loop above to avoid changing the
4429 	 * log tree while trying to change the log tree.
4430 	 */
4431 	while (!list_empty(&ordered_sums)) {
4432 		struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4433 						   struct btrfs_ordered_sum,
4434 						   list);
4435 		if (!ret)
4436 			ret = log_csums(trans, inode, log, sums);
4437 		list_del(&sums->list);
4438 		kfree(sums);
4439 	}
4440 
4441 	return ret;
4442 }
4443 
4444 static int extent_cmp(void *priv, const struct list_head *a,
4445 		      const struct list_head *b)
4446 {
4447 	const struct extent_map *em1, *em2;
4448 
4449 	em1 = list_entry(a, struct extent_map, list);
4450 	em2 = list_entry(b, struct extent_map, list);
4451 
4452 	if (em1->start < em2->start)
4453 		return -1;
4454 	else if (em1->start > em2->start)
4455 		return 1;
4456 	return 0;
4457 }
4458 
4459 static int log_extent_csums(struct btrfs_trans_handle *trans,
4460 			    struct btrfs_inode *inode,
4461 			    struct btrfs_root *log_root,
4462 			    const struct extent_map *em,
4463 			    struct btrfs_log_ctx *ctx)
4464 {
4465 	struct btrfs_ordered_extent *ordered;
4466 	u64 csum_offset;
4467 	u64 csum_len;
4468 	u64 mod_start = em->mod_start;
4469 	u64 mod_len = em->mod_len;
4470 	LIST_HEAD(ordered_sums);
4471 	int ret = 0;
4472 
4473 	if (inode->flags & BTRFS_INODE_NODATASUM ||
4474 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4475 	    em->block_start == EXTENT_MAP_HOLE)
4476 		return 0;
4477 
4478 	list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4479 		const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4480 		const u64 mod_end = mod_start + mod_len;
4481 		struct btrfs_ordered_sum *sums;
4482 
4483 		if (mod_len == 0)
4484 			break;
4485 
4486 		if (ordered_end <= mod_start)
4487 			continue;
4488 		if (mod_end <= ordered->file_offset)
4489 			break;
4490 
4491 		/*
4492 		 * We are going to copy all the csums on this ordered extent, so
4493 		 * go ahead and adjust mod_start and mod_len in case this ordered
4494 		 * extent has already been logged.
4495 		 */
4496 		if (ordered->file_offset > mod_start) {
4497 			if (ordered_end >= mod_end)
4498 				mod_len = ordered->file_offset - mod_start;
4499 			/*
4500 			 * If we have this case
4501 			 *
4502 			 * |--------- logged extent ---------|
4503 			 *       |----- ordered extent ----|
4504 			 *
4505 			 * Just don't mess with mod_start and mod_len, we'll
4506 			 * just end up logging more csums than we need and it
4507 			 * will be ok.
4508 			 */
4509 		} else {
4510 			if (ordered_end < mod_end) {
4511 				mod_len = mod_end - ordered_end;
4512 				mod_start = ordered_end;
4513 			} else {
4514 				mod_len = 0;
4515 			}
4516 		}
4517 
4518 		/*
4519 		 * To keep us from looping for the above case of an ordered
4520 		 * extent that falls inside of the logged extent.
4521 		 */
4522 		if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4523 			continue;
4524 
4525 		list_for_each_entry(sums, &ordered->list, list) {
4526 			ret = log_csums(trans, inode, log_root, sums);
4527 			if (ret)
4528 				return ret;
4529 		}
4530 	}
4531 
4532 	/* We're done, found all csums in the ordered extents. */
4533 	if (mod_len == 0)
4534 		return 0;
4535 
4536 	/* If we're compressed we have to save the entire range of csums. */
4537 	if (em->compress_type) {
4538 		csum_offset = 0;
4539 		csum_len = max(em->block_len, em->orig_block_len);
4540 	} else {
4541 		csum_offset = mod_start - em->start;
4542 		csum_len = mod_len;
4543 	}
4544 
4545 	/* block start is already adjusted for the file extent offset. */
4546 	ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4547 				       em->block_start + csum_offset,
4548 				       em->block_start + csum_offset +
4549 				       csum_len - 1, &ordered_sums, 0);
4550 	if (ret)
4551 		return ret;
4552 
4553 	while (!list_empty(&ordered_sums)) {
4554 		struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4555 						   struct btrfs_ordered_sum,
4556 						   list);
4557 		if (!ret)
4558 			ret = log_csums(trans, inode, log_root, sums);
4559 		list_del(&sums->list);
4560 		kfree(sums);
4561 	}
4562 
4563 	return ret;
4564 }
4565 
4566 static int log_one_extent(struct btrfs_trans_handle *trans,
4567 			  struct btrfs_inode *inode,
4568 			  const struct extent_map *em,
4569 			  struct btrfs_path *path,
4570 			  struct btrfs_log_ctx *ctx)
4571 {
4572 	struct btrfs_drop_extents_args drop_args = { 0 };
4573 	struct btrfs_root *log = inode->root->log_root;
4574 	struct btrfs_file_extent_item *fi;
4575 	struct extent_buffer *leaf;
4576 	struct btrfs_map_token token;
4577 	struct btrfs_key key;
4578 	u64 extent_offset = em->start - em->orig_start;
4579 	u64 block_len;
4580 	int ret;
4581 
4582 	ret = log_extent_csums(trans, inode, log, em, ctx);
4583 	if (ret)
4584 		return ret;
4585 
4586 	/*
4587 	 * If this is the first time we are logging the inode in the current
4588 	 * transaction, we can avoid btrfs_drop_extents(), which is expensive
4589 	 * because it does a deletion search, which always acquires write locks
4590 	 * for extent buffers at levels 2, 1 and 0. This not only wastes time
4591 	 * but also adds significant contention in a log tree, since log trees
4592 	 * are small, with a root at level 2 or 3 at most, due to their short
4593 	 * life span.
4594 	 */
4595 	if (inode_logged(trans, inode)) {
4596 		drop_args.path = path;
4597 		drop_args.start = em->start;
4598 		drop_args.end = em->start + em->len;
4599 		drop_args.replace_extent = true;
4600 		drop_args.extent_item_size = sizeof(*fi);
4601 		ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4602 		if (ret)
4603 			return ret;
4604 	}
4605 
4606 	if (!drop_args.extent_inserted) {
4607 		key.objectid = btrfs_ino(inode);
4608 		key.type = BTRFS_EXTENT_DATA_KEY;
4609 		key.offset = em->start;
4610 
4611 		ret = btrfs_insert_empty_item(trans, log, path, &key,
4612 					      sizeof(*fi));
4613 		if (ret)
4614 			return ret;
4615 	}
4616 	leaf = path->nodes[0];
4617 	btrfs_init_map_token(&token, leaf);
4618 	fi = btrfs_item_ptr(leaf, path->slots[0],
4619 			    struct btrfs_file_extent_item);
4620 
4621 	btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
4622 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4623 		btrfs_set_token_file_extent_type(&token, fi,
4624 						 BTRFS_FILE_EXTENT_PREALLOC);
4625 	else
4626 		btrfs_set_token_file_extent_type(&token, fi,
4627 						 BTRFS_FILE_EXTENT_REG);
4628 
4629 	block_len = max(em->block_len, em->orig_block_len);
4630 	if (em->compress_type != BTRFS_COMPRESS_NONE) {
4631 		btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4632 							em->block_start);
4633 		btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4634 	} else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4635 		btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4636 							em->block_start -
4637 							extent_offset);
4638 		btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4639 	} else {
4640 		btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
4641 		btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
4642 	}
4643 
4644 	btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
4645 	btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
4646 	btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
4647 	btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
4648 	btrfs_set_token_file_extent_encryption(&token, fi, 0);
4649 	btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
4650 	btrfs_mark_buffer_dirty(leaf);
4651 
4652 	btrfs_release_path(path);
4653 
4654 	return ret;
4655 }
4656 
4657 /*
4658  * Log all prealloc extents beyond the inode's i_size to make sure we do not
4659  * lose them after doing a fast fsync and replaying the log. We scan the
4660  * subvolume's root instead of iterating the inode's extent map tree because
4661  * otherwise we can log incorrect extent items based on extent map conversion.
4662  * That can happen due to the fact that extent maps are merged when they
4663  * are not in the extent map tree's list of modified extents.
4664  */
4665 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4666 				      struct btrfs_inode *inode,
4667 				      struct btrfs_path *path)
4668 {
4669 	struct btrfs_root *root = inode->root;
4670 	struct btrfs_key key;
4671 	const u64 i_size = i_size_read(&inode->vfs_inode);
4672 	const u64 ino = btrfs_ino(inode);
4673 	struct btrfs_path *dst_path = NULL;
4674 	bool dropped_extents = false;
4675 	u64 truncate_offset = i_size;
4676 	struct extent_buffer *leaf;
4677 	int slot;
4678 	int ins_nr = 0;
4679 	int start_slot;
4680 	int ret;
4681 
4682 	if (!(inode->flags & BTRFS_INODE_PREALLOC))
4683 		return 0;
4684 
4685 	key.objectid = ino;
4686 	key.type = BTRFS_EXTENT_DATA_KEY;
4687 	key.offset = i_size;
4688 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4689 	if (ret < 0)
4690 		goto out;
4691 
4692 	/*
4693 	 * We must check if there is a prealloc extent that starts before the
4694 	 * i_size and crosses the i_size boundary. This is to ensure later we
4695 	 * truncate down to the end of that extent and not to the i_size, as
4696 	 * otherwise we end up losing part of the prealloc extent after a log
4697 	 * replay and with an implicit hole if there is another prealloc extent
4698 	 * that starts at an offset beyond i_size.
4699 	 */
4700 	ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4701 	if (ret < 0)
4702 		goto out;
4703 
4704 	if (ret == 0) {
4705 		struct btrfs_file_extent_item *ei;
4706 
4707 		leaf = path->nodes[0];
4708 		slot = path->slots[0];
4709 		ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4710 
4711 		if (btrfs_file_extent_type(leaf, ei) ==
4712 		    BTRFS_FILE_EXTENT_PREALLOC) {
4713 			u64 extent_end;
4714 
4715 			btrfs_item_key_to_cpu(leaf, &key, slot);
4716 			extent_end = key.offset +
4717 				btrfs_file_extent_num_bytes(leaf, ei);
4718 
4719 			if (extent_end > i_size)
4720 				truncate_offset = extent_end;
4721 		}
4722 	} else {
4723 		ret = 0;
4724 	}
4725 
4726 	while (true) {
4727 		leaf = path->nodes[0];
4728 		slot = path->slots[0];
4729 
4730 		if (slot >= btrfs_header_nritems(leaf)) {
4731 			if (ins_nr > 0) {
4732 				ret = copy_items(trans, inode, dst_path, path,
4733 						 start_slot, ins_nr, 1, 0);
4734 				if (ret < 0)
4735 					goto out;
4736 				ins_nr = 0;
4737 			}
4738 			ret = btrfs_next_leaf(root, path);
4739 			if (ret < 0)
4740 				goto out;
4741 			if (ret > 0) {
4742 				ret = 0;
4743 				break;
4744 			}
4745 			continue;
4746 		}
4747 
4748 		btrfs_item_key_to_cpu(leaf, &key, slot);
4749 		if (key.objectid > ino)
4750 			break;
4751 		if (WARN_ON_ONCE(key.objectid < ino) ||
4752 		    key.type < BTRFS_EXTENT_DATA_KEY ||
4753 		    key.offset < i_size) {
4754 			path->slots[0]++;
4755 			continue;
4756 		}
4757 		if (!dropped_extents) {
4758 			/*
4759 			 * Avoid logging extent items logged in past fsync calls
4760 			 * and leading to duplicate keys in the log tree.
4761 			 */
4762 			ret = truncate_inode_items(trans, root->log_root, inode,
4763 						   truncate_offset,
4764 						   BTRFS_EXTENT_DATA_KEY);
4765 			if (ret)
4766 				goto out;
4767 			dropped_extents = true;
4768 		}
4769 		if (ins_nr == 0)
4770 			start_slot = slot;
4771 		ins_nr++;
4772 		path->slots[0]++;
4773 		if (!dst_path) {
4774 			dst_path = btrfs_alloc_path();
4775 			if (!dst_path) {
4776 				ret = -ENOMEM;
4777 				goto out;
4778 			}
4779 		}
4780 	}
4781 	if (ins_nr > 0)
4782 		ret = copy_items(trans, inode, dst_path, path,
4783 				 start_slot, ins_nr, 1, 0);
4784 out:
4785 	btrfs_release_path(path);
4786 	btrfs_free_path(dst_path);
4787 	return ret;
4788 }
4789 
4790 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4791 				     struct btrfs_inode *inode,
4792 				     struct btrfs_path *path,
4793 				     struct btrfs_log_ctx *ctx)
4794 {
4795 	struct btrfs_ordered_extent *ordered;
4796 	struct btrfs_ordered_extent *tmp;
4797 	struct extent_map *em, *n;
4798 	struct list_head extents;
4799 	struct extent_map_tree *tree = &inode->extent_tree;
4800 	int ret = 0;
4801 	int num = 0;
4802 
4803 	INIT_LIST_HEAD(&extents);
4804 
4805 	write_lock(&tree->lock);
4806 
4807 	list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4808 		list_del_init(&em->list);
4809 		/*
4810 		 * Just an arbitrary number, this can be really CPU intensive
4811 		 * once we start getting a lot of extents, and really once we
4812 		 * have a bunch of extents we just want to commit since it will
4813 		 * be faster.
4814 		 */
4815 		if (++num > 32768) {
4816 			list_del_init(&tree->modified_extents);
4817 			ret = -EFBIG;
4818 			goto process;
4819 		}
4820 
4821 		if (em->generation < trans->transid)
4822 			continue;
4823 
4824 		/* We log prealloc extents beyond eof later. */
4825 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4826 		    em->start >= i_size_read(&inode->vfs_inode))
4827 			continue;
4828 
4829 		/* Need a ref to keep it from getting evicted from cache */
4830 		refcount_inc(&em->refs);
4831 		set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4832 		list_add_tail(&em->list, &extents);
4833 		num++;
4834 	}
4835 
4836 	list_sort(NULL, &extents, extent_cmp);
4837 process:
4838 	while (!list_empty(&extents)) {
4839 		em = list_entry(extents.next, struct extent_map, list);
4840 
4841 		list_del_init(&em->list);
4842 
4843 		/*
4844 		 * If we had an error we just need to delete everybody from our
4845 		 * private list.
4846 		 */
4847 		if (ret) {
4848 			clear_em_logging(tree, em);
4849 			free_extent_map(em);
4850 			continue;
4851 		}
4852 
4853 		write_unlock(&tree->lock);
4854 
4855 		ret = log_one_extent(trans, inode, em, path, ctx);
4856 		write_lock(&tree->lock);
4857 		clear_em_logging(tree, em);
4858 		free_extent_map(em);
4859 	}
4860 	WARN_ON(!list_empty(&extents));
4861 	write_unlock(&tree->lock);
4862 
4863 	btrfs_release_path(path);
4864 	if (!ret)
4865 		ret = btrfs_log_prealloc_extents(trans, inode, path);
4866 	if (ret)
4867 		return ret;
4868 
4869 	/*
4870 	 * We have logged all extents successfully, now make sure the commit of
4871 	 * the current transaction waits for the ordered extents to complete
4872 	 * before it commits and wipes out the log trees, otherwise we would
4873 	 * lose data if an ordered extents completes after the transaction
4874 	 * commits and a power failure happens after the transaction commit.
4875 	 */
4876 	list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4877 		list_del_init(&ordered->log_list);
4878 		set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4879 
4880 		if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4881 			spin_lock_irq(&inode->ordered_tree.lock);
4882 			if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4883 				set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4884 				atomic_inc(&trans->transaction->pending_ordered);
4885 			}
4886 			spin_unlock_irq(&inode->ordered_tree.lock);
4887 		}
4888 		btrfs_put_ordered_extent(ordered);
4889 	}
4890 
4891 	return 0;
4892 }
4893 
4894 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4895 			     struct btrfs_path *path, u64 *size_ret)
4896 {
4897 	struct btrfs_key key;
4898 	int ret;
4899 
4900 	key.objectid = btrfs_ino(inode);
4901 	key.type = BTRFS_INODE_ITEM_KEY;
4902 	key.offset = 0;
4903 
4904 	ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4905 	if (ret < 0) {
4906 		return ret;
4907 	} else if (ret > 0) {
4908 		*size_ret = 0;
4909 	} else {
4910 		struct btrfs_inode_item *item;
4911 
4912 		item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4913 				      struct btrfs_inode_item);
4914 		*size_ret = btrfs_inode_size(path->nodes[0], item);
4915 		/*
4916 		 * If the in-memory inode's i_size is smaller then the inode
4917 		 * size stored in the btree, return the inode's i_size, so
4918 		 * that we get a correct inode size after replaying the log
4919 		 * when before a power failure we had a shrinking truncate
4920 		 * followed by addition of a new name (rename / new hard link).
4921 		 * Otherwise return the inode size from the btree, to avoid
4922 		 * data loss when replaying a log due to previously doing a
4923 		 * write that expands the inode's size and logging a new name
4924 		 * immediately after.
4925 		 */
4926 		if (*size_ret > inode->vfs_inode.i_size)
4927 			*size_ret = inode->vfs_inode.i_size;
4928 	}
4929 
4930 	btrfs_release_path(path);
4931 	return 0;
4932 }
4933 
4934 /*
4935  * At the moment we always log all xattrs. This is to figure out at log replay
4936  * time which xattrs must have their deletion replayed. If a xattr is missing
4937  * in the log tree and exists in the fs/subvol tree, we delete it. This is
4938  * because if a xattr is deleted, the inode is fsynced and a power failure
4939  * happens, causing the log to be replayed the next time the fs is mounted,
4940  * we want the xattr to not exist anymore (same behaviour as other filesystems
4941  * with a journal, ext3/4, xfs, f2fs, etc).
4942  */
4943 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4944 				struct btrfs_inode *inode,
4945 				struct btrfs_path *path,
4946 				struct btrfs_path *dst_path)
4947 {
4948 	struct btrfs_root *root = inode->root;
4949 	int ret;
4950 	struct btrfs_key key;
4951 	const u64 ino = btrfs_ino(inode);
4952 	int ins_nr = 0;
4953 	int start_slot = 0;
4954 	bool found_xattrs = false;
4955 
4956 	if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
4957 		return 0;
4958 
4959 	key.objectid = ino;
4960 	key.type = BTRFS_XATTR_ITEM_KEY;
4961 	key.offset = 0;
4962 
4963 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4964 	if (ret < 0)
4965 		return ret;
4966 
4967 	while (true) {
4968 		int slot = path->slots[0];
4969 		struct extent_buffer *leaf = path->nodes[0];
4970 		int nritems = btrfs_header_nritems(leaf);
4971 
4972 		if (slot >= nritems) {
4973 			if (ins_nr > 0) {
4974 				ret = copy_items(trans, inode, dst_path, path,
4975 						 start_slot, ins_nr, 1, 0);
4976 				if (ret < 0)
4977 					return ret;
4978 				ins_nr = 0;
4979 			}
4980 			ret = btrfs_next_leaf(root, path);
4981 			if (ret < 0)
4982 				return ret;
4983 			else if (ret > 0)
4984 				break;
4985 			continue;
4986 		}
4987 
4988 		btrfs_item_key_to_cpu(leaf, &key, slot);
4989 		if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4990 			break;
4991 
4992 		if (ins_nr == 0)
4993 			start_slot = slot;
4994 		ins_nr++;
4995 		path->slots[0]++;
4996 		found_xattrs = true;
4997 		cond_resched();
4998 	}
4999 	if (ins_nr > 0) {
5000 		ret = copy_items(trans, inode, dst_path, path,
5001 				 start_slot, ins_nr, 1, 0);
5002 		if (ret < 0)
5003 			return ret;
5004 	}
5005 
5006 	if (!found_xattrs)
5007 		set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
5008 
5009 	return 0;
5010 }
5011 
5012 /*
5013  * When using the NO_HOLES feature if we punched a hole that causes the
5014  * deletion of entire leafs or all the extent items of the first leaf (the one
5015  * that contains the inode item and references) we may end up not processing
5016  * any extents, because there are no leafs with a generation matching the
5017  * current transaction that have extent items for our inode. So we need to find
5018  * if any holes exist and then log them. We also need to log holes after any
5019  * truncate operation that changes the inode's size.
5020  */
5021 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
5022 			   struct btrfs_inode *inode,
5023 			   struct btrfs_path *path)
5024 {
5025 	struct btrfs_root *root = inode->root;
5026 	struct btrfs_fs_info *fs_info = root->fs_info;
5027 	struct btrfs_key key;
5028 	const u64 ino = btrfs_ino(inode);
5029 	const u64 i_size = i_size_read(&inode->vfs_inode);
5030 	u64 prev_extent_end = 0;
5031 	int ret;
5032 
5033 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
5034 		return 0;
5035 
5036 	key.objectid = ino;
5037 	key.type = BTRFS_EXTENT_DATA_KEY;
5038 	key.offset = 0;
5039 
5040 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5041 	if (ret < 0)
5042 		return ret;
5043 
5044 	while (true) {
5045 		struct extent_buffer *leaf = path->nodes[0];
5046 
5047 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5048 			ret = btrfs_next_leaf(root, path);
5049 			if (ret < 0)
5050 				return ret;
5051 			if (ret > 0) {
5052 				ret = 0;
5053 				break;
5054 			}
5055 			leaf = path->nodes[0];
5056 		}
5057 
5058 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5059 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
5060 			break;
5061 
5062 		/* We have a hole, log it. */
5063 		if (prev_extent_end < key.offset) {
5064 			const u64 hole_len = key.offset - prev_extent_end;
5065 
5066 			/*
5067 			 * Release the path to avoid deadlocks with other code
5068 			 * paths that search the root while holding locks on
5069 			 * leafs from the log root.
5070 			 */
5071 			btrfs_release_path(path);
5072 			ret = btrfs_insert_file_extent(trans, root->log_root,
5073 						       ino, prev_extent_end, 0,
5074 						       0, hole_len, 0, hole_len,
5075 						       0, 0, 0);
5076 			if (ret < 0)
5077 				return ret;
5078 
5079 			/*
5080 			 * Search for the same key again in the root. Since it's
5081 			 * an extent item and we are holding the inode lock, the
5082 			 * key must still exist. If it doesn't just emit warning
5083 			 * and return an error to fall back to a transaction
5084 			 * commit.
5085 			 */
5086 			ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5087 			if (ret < 0)
5088 				return ret;
5089 			if (WARN_ON(ret > 0))
5090 				return -ENOENT;
5091 			leaf = path->nodes[0];
5092 		}
5093 
5094 		prev_extent_end = btrfs_file_extent_end(path);
5095 		path->slots[0]++;
5096 		cond_resched();
5097 	}
5098 
5099 	if (prev_extent_end < i_size) {
5100 		u64 hole_len;
5101 
5102 		btrfs_release_path(path);
5103 		hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
5104 		ret = btrfs_insert_file_extent(trans, root->log_root,
5105 					       ino, prev_extent_end, 0, 0,
5106 					       hole_len, 0, hole_len,
5107 					       0, 0, 0);
5108 		if (ret < 0)
5109 			return ret;
5110 	}
5111 
5112 	return 0;
5113 }
5114 
5115 /*
5116  * When we are logging a new inode X, check if it doesn't have a reference that
5117  * matches the reference from some other inode Y created in a past transaction
5118  * and that was renamed in the current transaction. If we don't do this, then at
5119  * log replay time we can lose inode Y (and all its files if it's a directory):
5120  *
5121  * mkdir /mnt/x
5122  * echo "hello world" > /mnt/x/foobar
5123  * sync
5124  * mv /mnt/x /mnt/y
5125  * mkdir /mnt/x                 # or touch /mnt/x
5126  * xfs_io -c fsync /mnt/x
5127  * <power fail>
5128  * mount fs, trigger log replay
5129  *
5130  * After the log replay procedure, we would lose the first directory and all its
5131  * files (file foobar).
5132  * For the case where inode Y is not a directory we simply end up losing it:
5133  *
5134  * echo "123" > /mnt/foo
5135  * sync
5136  * mv /mnt/foo /mnt/bar
5137  * echo "abc" > /mnt/foo
5138  * xfs_io -c fsync /mnt/foo
5139  * <power fail>
5140  *
5141  * We also need this for cases where a snapshot entry is replaced by some other
5142  * entry (file or directory) otherwise we end up with an unreplayable log due to
5143  * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
5144  * if it were a regular entry:
5145  *
5146  * mkdir /mnt/x
5147  * btrfs subvolume snapshot /mnt /mnt/x/snap
5148  * btrfs subvolume delete /mnt/x/snap
5149  * rmdir /mnt/x
5150  * mkdir /mnt/x
5151  * fsync /mnt/x or fsync some new file inside it
5152  * <power fail>
5153  *
5154  * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
5155  * the same transaction.
5156  */
5157 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
5158 					 const int slot,
5159 					 const struct btrfs_key *key,
5160 					 struct btrfs_inode *inode,
5161 					 u64 *other_ino, u64 *other_parent)
5162 {
5163 	int ret;
5164 	struct btrfs_path *search_path;
5165 	char *name = NULL;
5166 	u32 name_len = 0;
5167 	u32 item_size = btrfs_item_size_nr(eb, slot);
5168 	u32 cur_offset = 0;
5169 	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
5170 
5171 	search_path = btrfs_alloc_path();
5172 	if (!search_path)
5173 		return -ENOMEM;
5174 	search_path->search_commit_root = 1;
5175 	search_path->skip_locking = 1;
5176 
5177 	while (cur_offset < item_size) {
5178 		u64 parent;
5179 		u32 this_name_len;
5180 		u32 this_len;
5181 		unsigned long name_ptr;
5182 		struct btrfs_dir_item *di;
5183 
5184 		if (key->type == BTRFS_INODE_REF_KEY) {
5185 			struct btrfs_inode_ref *iref;
5186 
5187 			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
5188 			parent = key->offset;
5189 			this_name_len = btrfs_inode_ref_name_len(eb, iref);
5190 			name_ptr = (unsigned long)(iref + 1);
5191 			this_len = sizeof(*iref) + this_name_len;
5192 		} else {
5193 			struct btrfs_inode_extref *extref;
5194 
5195 			extref = (struct btrfs_inode_extref *)(ptr +
5196 							       cur_offset);
5197 			parent = btrfs_inode_extref_parent(eb, extref);
5198 			this_name_len = btrfs_inode_extref_name_len(eb, extref);
5199 			name_ptr = (unsigned long)&extref->name;
5200 			this_len = sizeof(*extref) + this_name_len;
5201 		}
5202 
5203 		if (this_name_len > name_len) {
5204 			char *new_name;
5205 
5206 			new_name = krealloc(name, this_name_len, GFP_NOFS);
5207 			if (!new_name) {
5208 				ret = -ENOMEM;
5209 				goto out;
5210 			}
5211 			name_len = this_name_len;
5212 			name = new_name;
5213 		}
5214 
5215 		read_extent_buffer(eb, name, name_ptr, this_name_len);
5216 		di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
5217 				parent, name, this_name_len, 0);
5218 		if (di && !IS_ERR(di)) {
5219 			struct btrfs_key di_key;
5220 
5221 			btrfs_dir_item_key_to_cpu(search_path->nodes[0],
5222 						  di, &di_key);
5223 			if (di_key.type == BTRFS_INODE_ITEM_KEY) {
5224 				if (di_key.objectid != key->objectid) {
5225 					ret = 1;
5226 					*other_ino = di_key.objectid;
5227 					*other_parent = parent;
5228 				} else {
5229 					ret = 0;
5230 				}
5231 			} else {
5232 				ret = -EAGAIN;
5233 			}
5234 			goto out;
5235 		} else if (IS_ERR(di)) {
5236 			ret = PTR_ERR(di);
5237 			goto out;
5238 		}
5239 		btrfs_release_path(search_path);
5240 
5241 		cur_offset += this_len;
5242 	}
5243 	ret = 0;
5244 out:
5245 	btrfs_free_path(search_path);
5246 	kfree(name);
5247 	return ret;
5248 }
5249 
5250 struct btrfs_ino_list {
5251 	u64 ino;
5252 	u64 parent;
5253 	struct list_head list;
5254 };
5255 
5256 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5257 				  struct btrfs_root *root,
5258 				  struct btrfs_path *path,
5259 				  struct btrfs_log_ctx *ctx,
5260 				  u64 ino, u64 parent)
5261 {
5262 	struct btrfs_ino_list *ino_elem;
5263 	LIST_HEAD(inode_list);
5264 	int ret = 0;
5265 
5266 	ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5267 	if (!ino_elem)
5268 		return -ENOMEM;
5269 	ino_elem->ino = ino;
5270 	ino_elem->parent = parent;
5271 	list_add_tail(&ino_elem->list, &inode_list);
5272 
5273 	while (!list_empty(&inode_list)) {
5274 		struct btrfs_fs_info *fs_info = root->fs_info;
5275 		struct btrfs_key key;
5276 		struct inode *inode;
5277 
5278 		ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
5279 					    list);
5280 		ino = ino_elem->ino;
5281 		parent = ino_elem->parent;
5282 		list_del(&ino_elem->list);
5283 		kfree(ino_elem);
5284 		if (ret)
5285 			continue;
5286 
5287 		btrfs_release_path(path);
5288 
5289 		inode = btrfs_iget(fs_info->sb, ino, root);
5290 		/*
5291 		 * If the other inode that had a conflicting dir entry was
5292 		 * deleted in the current transaction, we need to log its parent
5293 		 * directory.
5294 		 */
5295 		if (IS_ERR(inode)) {
5296 			ret = PTR_ERR(inode);
5297 			if (ret == -ENOENT) {
5298 				inode = btrfs_iget(fs_info->sb, parent, root);
5299 				if (IS_ERR(inode)) {
5300 					ret = PTR_ERR(inode);
5301 				} else {
5302 					ret = btrfs_log_inode(trans,
5303 						      BTRFS_I(inode),
5304 						      LOG_OTHER_INODE_ALL,
5305 						      ctx);
5306 					btrfs_add_delayed_iput(inode);
5307 				}
5308 			}
5309 			continue;
5310 		}
5311 		/*
5312 		 * If the inode was already logged skip it - otherwise we can
5313 		 * hit an infinite loop. Example:
5314 		 *
5315 		 * From the commit root (previous transaction) we have the
5316 		 * following inodes:
5317 		 *
5318 		 * inode 257 a directory
5319 		 * inode 258 with references "zz" and "zz_link" on inode 257
5320 		 * inode 259 with reference "a" on inode 257
5321 		 *
5322 		 * And in the current (uncommitted) transaction we have:
5323 		 *
5324 		 * inode 257 a directory, unchanged
5325 		 * inode 258 with references "a" and "a2" on inode 257
5326 		 * inode 259 with reference "zz_link" on inode 257
5327 		 * inode 261 with reference "zz" on inode 257
5328 		 *
5329 		 * When logging inode 261 the following infinite loop could
5330 		 * happen if we don't skip already logged inodes:
5331 		 *
5332 		 * - we detect inode 258 as a conflicting inode, with inode 261
5333 		 *   on reference "zz", and log it;
5334 		 *
5335 		 * - we detect inode 259 as a conflicting inode, with inode 258
5336 		 *   on reference "a", and log it;
5337 		 *
5338 		 * - we detect inode 258 as a conflicting inode, with inode 259
5339 		 *   on reference "zz_link", and log it - again! After this we
5340 		 *   repeat the above steps forever.
5341 		 */
5342 		spin_lock(&BTRFS_I(inode)->lock);
5343 		/*
5344 		 * Check the inode's logged_trans only instead of
5345 		 * btrfs_inode_in_log(). This is because the last_log_commit of
5346 		 * the inode is not updated when we only log that it exists (see
5347 		 * btrfs_log_inode()).
5348 		 */
5349 		if (BTRFS_I(inode)->logged_trans == trans->transid) {
5350 			spin_unlock(&BTRFS_I(inode)->lock);
5351 			btrfs_add_delayed_iput(inode);
5352 			continue;
5353 		}
5354 		spin_unlock(&BTRFS_I(inode)->lock);
5355 		/*
5356 		 * We are safe logging the other inode without acquiring its
5357 		 * lock as long as we log with the LOG_INODE_EXISTS mode. We
5358 		 * are safe against concurrent renames of the other inode as
5359 		 * well because during a rename we pin the log and update the
5360 		 * log with the new name before we unpin it.
5361 		 */
5362 		ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_OTHER_INODE, ctx);
5363 		if (ret) {
5364 			btrfs_add_delayed_iput(inode);
5365 			continue;
5366 		}
5367 
5368 		key.objectid = ino;
5369 		key.type = BTRFS_INODE_REF_KEY;
5370 		key.offset = 0;
5371 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5372 		if (ret < 0) {
5373 			btrfs_add_delayed_iput(inode);
5374 			continue;
5375 		}
5376 
5377 		while (true) {
5378 			struct extent_buffer *leaf = path->nodes[0];
5379 			int slot = path->slots[0];
5380 			u64 other_ino = 0;
5381 			u64 other_parent = 0;
5382 
5383 			if (slot >= btrfs_header_nritems(leaf)) {
5384 				ret = btrfs_next_leaf(root, path);
5385 				if (ret < 0) {
5386 					break;
5387 				} else if (ret > 0) {
5388 					ret = 0;
5389 					break;
5390 				}
5391 				continue;
5392 			}
5393 
5394 			btrfs_item_key_to_cpu(leaf, &key, slot);
5395 			if (key.objectid != ino ||
5396 			    (key.type != BTRFS_INODE_REF_KEY &&
5397 			     key.type != BTRFS_INODE_EXTREF_KEY)) {
5398 				ret = 0;
5399 				break;
5400 			}
5401 
5402 			ret = btrfs_check_ref_name_override(leaf, slot, &key,
5403 					BTRFS_I(inode), &other_ino,
5404 					&other_parent);
5405 			if (ret < 0)
5406 				break;
5407 			if (ret > 0) {
5408 				ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5409 				if (!ino_elem) {
5410 					ret = -ENOMEM;
5411 					break;
5412 				}
5413 				ino_elem->ino = other_ino;
5414 				ino_elem->parent = other_parent;
5415 				list_add_tail(&ino_elem->list, &inode_list);
5416 				ret = 0;
5417 			}
5418 			path->slots[0]++;
5419 		}
5420 		btrfs_add_delayed_iput(inode);
5421 	}
5422 
5423 	return ret;
5424 }
5425 
5426 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5427 				   struct btrfs_inode *inode,
5428 				   struct btrfs_key *min_key,
5429 				   const struct btrfs_key *max_key,
5430 				   struct btrfs_path *path,
5431 				   struct btrfs_path *dst_path,
5432 				   const u64 logged_isize,
5433 				   const bool recursive_logging,
5434 				   const int inode_only,
5435 				   struct btrfs_log_ctx *ctx,
5436 				   bool *need_log_inode_item)
5437 {
5438 	struct btrfs_root *root = inode->root;
5439 	int ins_start_slot = 0;
5440 	int ins_nr = 0;
5441 	int ret;
5442 
5443 	while (1) {
5444 		ret = btrfs_search_forward(root, min_key, path, trans->transid);
5445 		if (ret < 0)
5446 			return ret;
5447 		if (ret > 0) {
5448 			ret = 0;
5449 			break;
5450 		}
5451 again:
5452 		/* Note, ins_nr might be > 0 here, cleanup outside the loop */
5453 		if (min_key->objectid != max_key->objectid)
5454 			break;
5455 		if (min_key->type > max_key->type)
5456 			break;
5457 
5458 		if (min_key->type == BTRFS_INODE_ITEM_KEY)
5459 			*need_log_inode_item = false;
5460 
5461 		if ((min_key->type == BTRFS_INODE_REF_KEY ||
5462 		     min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5463 		    inode->generation == trans->transid &&
5464 		    !recursive_logging) {
5465 			u64 other_ino = 0;
5466 			u64 other_parent = 0;
5467 
5468 			ret = btrfs_check_ref_name_override(path->nodes[0],
5469 					path->slots[0], min_key, inode,
5470 					&other_ino, &other_parent);
5471 			if (ret < 0) {
5472 				return ret;
5473 			} else if (ret > 0 &&
5474 				   other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5475 				if (ins_nr > 0) {
5476 					ins_nr++;
5477 				} else {
5478 					ins_nr = 1;
5479 					ins_start_slot = path->slots[0];
5480 				}
5481 				ret = copy_items(trans, inode, dst_path, path,
5482 						 ins_start_slot, ins_nr,
5483 						 inode_only, logged_isize);
5484 				if (ret < 0)
5485 					return ret;
5486 				ins_nr = 0;
5487 
5488 				ret = log_conflicting_inodes(trans, root, path,
5489 						ctx, other_ino, other_parent);
5490 				if (ret)
5491 					return ret;
5492 				btrfs_release_path(path);
5493 				goto next_key;
5494 			}
5495 		}
5496 
5497 		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5498 		if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5499 			if (ins_nr == 0)
5500 				goto next_slot;
5501 			ret = copy_items(trans, inode, dst_path, path,
5502 					 ins_start_slot,
5503 					 ins_nr, inode_only, logged_isize);
5504 			if (ret < 0)
5505 				return ret;
5506 			ins_nr = 0;
5507 			goto next_slot;
5508 		}
5509 
5510 		if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5511 			ins_nr++;
5512 			goto next_slot;
5513 		} else if (!ins_nr) {
5514 			ins_start_slot = path->slots[0];
5515 			ins_nr = 1;
5516 			goto next_slot;
5517 		}
5518 
5519 		ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5520 				 ins_nr, inode_only, logged_isize);
5521 		if (ret < 0)
5522 			return ret;
5523 		ins_nr = 1;
5524 		ins_start_slot = path->slots[0];
5525 next_slot:
5526 		path->slots[0]++;
5527 		if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5528 			btrfs_item_key_to_cpu(path->nodes[0], min_key,
5529 					      path->slots[0]);
5530 			goto again;
5531 		}
5532 		if (ins_nr) {
5533 			ret = copy_items(trans, inode, dst_path, path,
5534 					 ins_start_slot, ins_nr, inode_only,
5535 					 logged_isize);
5536 			if (ret < 0)
5537 				return ret;
5538 			ins_nr = 0;
5539 		}
5540 		btrfs_release_path(path);
5541 next_key:
5542 		if (min_key->offset < (u64)-1) {
5543 			min_key->offset++;
5544 		} else if (min_key->type < max_key->type) {
5545 			min_key->type++;
5546 			min_key->offset = 0;
5547 		} else {
5548 			break;
5549 		}
5550 	}
5551 	if (ins_nr)
5552 		ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5553 				 ins_nr, inode_only, logged_isize);
5554 
5555 	return ret;
5556 }
5557 
5558 /* log a single inode in the tree log.
5559  * At least one parent directory for this inode must exist in the tree
5560  * or be logged already.
5561  *
5562  * Any items from this inode changed by the current transaction are copied
5563  * to the log tree.  An extra reference is taken on any extents in this
5564  * file, allowing us to avoid a whole pile of corner cases around logging
5565  * blocks that have been removed from the tree.
5566  *
5567  * See LOG_INODE_ALL and related defines for a description of what inode_only
5568  * does.
5569  *
5570  * This handles both files and directories.
5571  */
5572 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5573 			   struct btrfs_inode *inode,
5574 			   int inode_only,
5575 			   struct btrfs_log_ctx *ctx)
5576 {
5577 	struct btrfs_path *path;
5578 	struct btrfs_path *dst_path;
5579 	struct btrfs_key min_key;
5580 	struct btrfs_key max_key;
5581 	struct btrfs_root *log = inode->root->log_root;
5582 	int err = 0;
5583 	int ret = 0;
5584 	bool fast_search = false;
5585 	u64 ino = btrfs_ino(inode);
5586 	struct extent_map_tree *em_tree = &inode->extent_tree;
5587 	u64 logged_isize = 0;
5588 	bool need_log_inode_item = true;
5589 	bool xattrs_logged = false;
5590 	bool recursive_logging = false;
5591 	bool inode_item_dropped = true;
5592 
5593 	path = btrfs_alloc_path();
5594 	if (!path)
5595 		return -ENOMEM;
5596 	dst_path = btrfs_alloc_path();
5597 	if (!dst_path) {
5598 		btrfs_free_path(path);
5599 		return -ENOMEM;
5600 	}
5601 
5602 	min_key.objectid = ino;
5603 	min_key.type = BTRFS_INODE_ITEM_KEY;
5604 	min_key.offset = 0;
5605 
5606 	max_key.objectid = ino;
5607 
5608 
5609 	/* today the code can only do partial logging of directories */
5610 	if (S_ISDIR(inode->vfs_inode.i_mode) ||
5611 	    (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5612 		       &inode->runtime_flags) &&
5613 	     inode_only >= LOG_INODE_EXISTS))
5614 		max_key.type = BTRFS_XATTR_ITEM_KEY;
5615 	else
5616 		max_key.type = (u8)-1;
5617 	max_key.offset = (u64)-1;
5618 
5619 	/*
5620 	 * Only run delayed items if we are a directory. We want to make sure
5621 	 * all directory indexes hit the fs/subvolume tree so we can find them
5622 	 * and figure out which index ranges have to be logged.
5623 	 */
5624 	if (S_ISDIR(inode->vfs_inode.i_mode)) {
5625 		err = btrfs_commit_inode_delayed_items(trans, inode);
5626 		if (err)
5627 			goto out;
5628 	}
5629 
5630 	if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5631 		recursive_logging = true;
5632 		if (inode_only == LOG_OTHER_INODE)
5633 			inode_only = LOG_INODE_EXISTS;
5634 		else
5635 			inode_only = LOG_INODE_ALL;
5636 		mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5637 	} else {
5638 		mutex_lock(&inode->log_mutex);
5639 	}
5640 
5641 	/*
5642 	 * This is for cases where logging a directory could result in losing a
5643 	 * a file after replaying the log. For example, if we move a file from a
5644 	 * directory A to a directory B, then fsync directory A, we have no way
5645 	 * to known the file was moved from A to B, so logging just A would
5646 	 * result in losing the file after a log replay.
5647 	 */
5648 	if (S_ISDIR(inode->vfs_inode.i_mode) &&
5649 	    inode_only == LOG_INODE_ALL &&
5650 	    inode->last_unlink_trans >= trans->transid) {
5651 		btrfs_set_log_full_commit(trans);
5652 		err = 1;
5653 		goto out_unlock;
5654 	}
5655 
5656 	/*
5657 	 * a brute force approach to making sure we get the most uptodate
5658 	 * copies of everything.
5659 	 */
5660 	if (S_ISDIR(inode->vfs_inode.i_mode)) {
5661 		int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5662 
5663 		clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
5664 		if (inode_only == LOG_INODE_EXISTS)
5665 			max_key_type = BTRFS_XATTR_ITEM_KEY;
5666 		ret = drop_inode_items(trans, log, path, inode, max_key_type);
5667 	} else {
5668 		if (inode_only == LOG_INODE_EXISTS && inode_logged(trans, inode)) {
5669 			/*
5670 			 * Make sure the new inode item we write to the log has
5671 			 * the same isize as the current one (if it exists).
5672 			 * This is necessary to prevent data loss after log
5673 			 * replay, and also to prevent doing a wrong expanding
5674 			 * truncate - for e.g. create file, write 4K into offset
5675 			 * 0, fsync, write 4K into offset 4096, add hard link,
5676 			 * fsync some other file (to sync log), power fail - if
5677 			 * we use the inode's current i_size, after log replay
5678 			 * we get a 8Kb file, with the last 4Kb extent as a hole
5679 			 * (zeroes), as if an expanding truncate happened,
5680 			 * instead of getting a file of 4Kb only.
5681 			 */
5682 			err = logged_inode_size(log, inode, path, &logged_isize);
5683 			if (err)
5684 				goto out_unlock;
5685 		}
5686 		if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5687 			     &inode->runtime_flags)) {
5688 			if (inode_only == LOG_INODE_EXISTS) {
5689 				max_key.type = BTRFS_XATTR_ITEM_KEY;
5690 				ret = drop_inode_items(trans, log, path, inode,
5691 						       max_key.type);
5692 			} else {
5693 				clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5694 					  &inode->runtime_flags);
5695 				clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5696 					  &inode->runtime_flags);
5697 				if (inode_logged(trans, inode))
5698 					ret = truncate_inode_items(trans, log,
5699 								   inode, 0, 0);
5700 			}
5701 		} else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5702 					      &inode->runtime_flags) ||
5703 			   inode_only == LOG_INODE_EXISTS) {
5704 			if (inode_only == LOG_INODE_ALL)
5705 				fast_search = true;
5706 			max_key.type = BTRFS_XATTR_ITEM_KEY;
5707 			ret = drop_inode_items(trans, log, path, inode,
5708 					       max_key.type);
5709 		} else {
5710 			if (inode_only == LOG_INODE_ALL)
5711 				fast_search = true;
5712 			inode_item_dropped = false;
5713 			goto log_extents;
5714 		}
5715 
5716 	}
5717 	if (ret) {
5718 		err = ret;
5719 		goto out_unlock;
5720 	}
5721 
5722 	err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5723 				      path, dst_path, logged_isize,
5724 				      recursive_logging, inode_only, ctx,
5725 				      &need_log_inode_item);
5726 	if (err)
5727 		goto out_unlock;
5728 
5729 	btrfs_release_path(path);
5730 	btrfs_release_path(dst_path);
5731 	err = btrfs_log_all_xattrs(trans, inode, path, dst_path);
5732 	if (err)
5733 		goto out_unlock;
5734 	xattrs_logged = true;
5735 	if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5736 		btrfs_release_path(path);
5737 		btrfs_release_path(dst_path);
5738 		err = btrfs_log_holes(trans, inode, path);
5739 		if (err)
5740 			goto out_unlock;
5741 	}
5742 log_extents:
5743 	btrfs_release_path(path);
5744 	btrfs_release_path(dst_path);
5745 	if (need_log_inode_item) {
5746 		err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
5747 		if (err)
5748 			goto out_unlock;
5749 		/*
5750 		 * If we are doing a fast fsync and the inode was logged before
5751 		 * in this transaction, we don't need to log the xattrs because
5752 		 * they were logged before. If xattrs were added, changed or
5753 		 * deleted since the last time we logged the inode, then we have
5754 		 * already logged them because the inode had the runtime flag
5755 		 * BTRFS_INODE_COPY_EVERYTHING set.
5756 		 */
5757 		if (!xattrs_logged && inode->logged_trans < trans->transid) {
5758 			err = btrfs_log_all_xattrs(trans, inode, path, dst_path);
5759 			if (err)
5760 				goto out_unlock;
5761 			btrfs_release_path(path);
5762 		}
5763 	}
5764 	if (fast_search) {
5765 		ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx);
5766 		if (ret) {
5767 			err = ret;
5768 			goto out_unlock;
5769 		}
5770 	} else if (inode_only == LOG_INODE_ALL) {
5771 		struct extent_map *em, *n;
5772 
5773 		write_lock(&em_tree->lock);
5774 		list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5775 			list_del_init(&em->list);
5776 		write_unlock(&em_tree->lock);
5777 	}
5778 
5779 	if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5780 		ret = log_directory_changes(trans, inode, path, dst_path, ctx);
5781 		if (ret) {
5782 			err = ret;
5783 			goto out_unlock;
5784 		}
5785 	}
5786 
5787 	spin_lock(&inode->lock);
5788 	inode->logged_trans = trans->transid;
5789 	/*
5790 	 * Don't update last_log_commit if we logged that an inode exists.
5791 	 * We do this for three reasons:
5792 	 *
5793 	 * 1) We might have had buffered writes to this inode that were
5794 	 *    flushed and had their ordered extents completed in this
5795 	 *    transaction, but we did not previously log the inode with
5796 	 *    LOG_INODE_ALL. Later the inode was evicted and after that
5797 	 *    it was loaded again and this LOG_INODE_EXISTS log operation
5798 	 *    happened. We must make sure that if an explicit fsync against
5799 	 *    the inode is performed later, it logs the new extents, an
5800 	 *    updated inode item, etc, and syncs the log. The same logic
5801 	 *    applies to direct IO writes instead of buffered writes.
5802 	 *
5803 	 * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
5804 	 *    is logged with an i_size of 0 or whatever value was logged
5805 	 *    before. If later the i_size of the inode is increased by a
5806 	 *    truncate operation, the log is synced through an fsync of
5807 	 *    some other inode and then finally an explicit fsync against
5808 	 *    this inode is made, we must make sure this fsync logs the
5809 	 *    inode with the new i_size, the hole between old i_size and
5810 	 *    the new i_size, and syncs the log.
5811 	 *
5812 	 * 3) If we are logging that an ancestor inode exists as part of
5813 	 *    logging a new name from a link or rename operation, don't update
5814 	 *    its last_log_commit - otherwise if an explicit fsync is made
5815 	 *    against an ancestor, the fsync considers the inode in the log
5816 	 *    and doesn't sync the log, resulting in the ancestor missing after
5817 	 *    a power failure unless the log was synced as part of an fsync
5818 	 *    against any other unrelated inode.
5819 	 */
5820 	if (inode_only != LOG_INODE_EXISTS)
5821 		inode->last_log_commit = inode->last_sub_trans;
5822 	spin_unlock(&inode->lock);
5823 out_unlock:
5824 	mutex_unlock(&inode->log_mutex);
5825 out:
5826 	btrfs_free_path(path);
5827 	btrfs_free_path(dst_path);
5828 	return err;
5829 }
5830 
5831 /*
5832  * Check if we need to log an inode. This is used in contexts where while
5833  * logging an inode we need to log another inode (either that it exists or in
5834  * full mode). This is used instead of btrfs_inode_in_log() because the later
5835  * requires the inode to be in the log and have the log transaction committed,
5836  * while here we do not care if the log transaction was already committed - our
5837  * caller will commit the log later - and we want to avoid logging an inode
5838  * multiple times when multiple tasks have joined the same log transaction.
5839  */
5840 static bool need_log_inode(struct btrfs_trans_handle *trans,
5841 			   struct btrfs_inode *inode)
5842 {
5843 	/*
5844 	 * If a directory was not modified, no dentries added or removed, we can
5845 	 * and should avoid logging it.
5846 	 */
5847 	if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
5848 		return false;
5849 
5850 	/*
5851 	 * If this inode does not have new/updated/deleted xattrs since the last
5852 	 * time it was logged and is flagged as logged in the current transaction,
5853 	 * we can skip logging it. As for new/deleted names, those are updated in
5854 	 * the log by link/unlink/rename operations.
5855 	 * In case the inode was logged and then evicted and reloaded, its
5856 	 * logged_trans will be 0, in which case we have to fully log it since
5857 	 * logged_trans is a transient field, not persisted.
5858 	 */
5859 	if (inode->logged_trans == trans->transid &&
5860 	    !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
5861 		return false;
5862 
5863 	return true;
5864 }
5865 
5866 struct btrfs_dir_list {
5867 	u64 ino;
5868 	struct list_head list;
5869 };
5870 
5871 /*
5872  * Log the inodes of the new dentries of a directory. See log_dir_items() for
5873  * details about the why it is needed.
5874  * This is a recursive operation - if an existing dentry corresponds to a
5875  * directory, that directory's new entries are logged too (same behaviour as
5876  * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5877  * the dentries point to we do not lock their i_mutex, otherwise lockdep
5878  * complains about the following circular lock dependency / possible deadlock:
5879  *
5880  *        CPU0                                        CPU1
5881  *        ----                                        ----
5882  * lock(&type->i_mutex_dir_key#3/2);
5883  *                                            lock(sb_internal#2);
5884  *                                            lock(&type->i_mutex_dir_key#3/2);
5885  * lock(&sb->s_type->i_mutex_key#14);
5886  *
5887  * Where sb_internal is the lock (a counter that works as a lock) acquired by
5888  * sb_start_intwrite() in btrfs_start_transaction().
5889  * Not locking i_mutex of the inodes is still safe because:
5890  *
5891  * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5892  *    that while logging the inode new references (names) are added or removed
5893  *    from the inode, leaving the logged inode item with a link count that does
5894  *    not match the number of logged inode reference items. This is fine because
5895  *    at log replay time we compute the real number of links and correct the
5896  *    link count in the inode item (see replay_one_buffer() and
5897  *    link_to_fixup_dir());
5898  *
5899  * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5900  *    while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5901  *    BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5902  *    has a size that doesn't match the sum of the lengths of all the logged
5903  *    names. This does not result in a problem because if a dir_item key is
5904  *    logged but its matching dir_index key is not logged, at log replay time we
5905  *    don't use it to replay the respective name (see replay_one_name()). On the
5906  *    other hand if only the dir_index key ends up being logged, the respective
5907  *    name is added to the fs/subvol tree with both the dir_item and dir_index
5908  *    keys created (see replay_one_name()).
5909  *    The directory's inode item with a wrong i_size is not a problem as well,
5910  *    since we don't use it at log replay time to set the i_size in the inode
5911  *    item of the fs/subvol tree (see overwrite_item()).
5912  */
5913 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5914 				struct btrfs_root *root,
5915 				struct btrfs_inode *start_inode,
5916 				struct btrfs_log_ctx *ctx)
5917 {
5918 	struct btrfs_fs_info *fs_info = root->fs_info;
5919 	struct btrfs_root *log = root->log_root;
5920 	struct btrfs_path *path;
5921 	LIST_HEAD(dir_list);
5922 	struct btrfs_dir_list *dir_elem;
5923 	int ret = 0;
5924 
5925 	/*
5926 	 * If we are logging a new name, as part of a link or rename operation,
5927 	 * don't bother logging new dentries, as we just want to log the names
5928 	 * of an inode and that any new parents exist.
5929 	 */
5930 	if (ctx->logging_new_name)
5931 		return 0;
5932 
5933 	path = btrfs_alloc_path();
5934 	if (!path)
5935 		return -ENOMEM;
5936 
5937 	dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5938 	if (!dir_elem) {
5939 		btrfs_free_path(path);
5940 		return -ENOMEM;
5941 	}
5942 	dir_elem->ino = btrfs_ino(start_inode);
5943 	list_add_tail(&dir_elem->list, &dir_list);
5944 
5945 	while (!list_empty(&dir_list)) {
5946 		struct extent_buffer *leaf;
5947 		struct btrfs_key min_key;
5948 		int nritems;
5949 		int i;
5950 
5951 		dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5952 					    list);
5953 		if (ret)
5954 			goto next_dir_inode;
5955 
5956 		min_key.objectid = dir_elem->ino;
5957 		min_key.type = BTRFS_DIR_ITEM_KEY;
5958 		min_key.offset = 0;
5959 again:
5960 		btrfs_release_path(path);
5961 		ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5962 		if (ret < 0) {
5963 			goto next_dir_inode;
5964 		} else if (ret > 0) {
5965 			ret = 0;
5966 			goto next_dir_inode;
5967 		}
5968 
5969 process_leaf:
5970 		leaf = path->nodes[0];
5971 		nritems = btrfs_header_nritems(leaf);
5972 		for (i = path->slots[0]; i < nritems; i++) {
5973 			struct btrfs_dir_item *di;
5974 			struct btrfs_key di_key;
5975 			struct inode *di_inode;
5976 			struct btrfs_dir_list *new_dir_elem;
5977 			int log_mode = LOG_INODE_EXISTS;
5978 			int type;
5979 
5980 			btrfs_item_key_to_cpu(leaf, &min_key, i);
5981 			if (min_key.objectid != dir_elem->ino ||
5982 			    min_key.type != BTRFS_DIR_ITEM_KEY)
5983 				goto next_dir_inode;
5984 
5985 			di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5986 			type = btrfs_dir_type(leaf, di);
5987 			if (btrfs_dir_transid(leaf, di) < trans->transid &&
5988 			    type != BTRFS_FT_DIR)
5989 				continue;
5990 			btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5991 			if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5992 				continue;
5993 
5994 			btrfs_release_path(path);
5995 			di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5996 			if (IS_ERR(di_inode)) {
5997 				ret = PTR_ERR(di_inode);
5998 				goto next_dir_inode;
5999 			}
6000 
6001 			if (!need_log_inode(trans, BTRFS_I(di_inode))) {
6002 				btrfs_add_delayed_iput(di_inode);
6003 				break;
6004 			}
6005 
6006 			ctx->log_new_dentries = false;
6007 			if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
6008 				log_mode = LOG_INODE_ALL;
6009 			ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
6010 					      log_mode, ctx);
6011 			btrfs_add_delayed_iput(di_inode);
6012 			if (ret)
6013 				goto next_dir_inode;
6014 			if (ctx->log_new_dentries) {
6015 				new_dir_elem = kmalloc(sizeof(*new_dir_elem),
6016 						       GFP_NOFS);
6017 				if (!new_dir_elem) {
6018 					ret = -ENOMEM;
6019 					goto next_dir_inode;
6020 				}
6021 				new_dir_elem->ino = di_key.objectid;
6022 				list_add_tail(&new_dir_elem->list, &dir_list);
6023 			}
6024 			break;
6025 		}
6026 		if (i == nritems) {
6027 			ret = btrfs_next_leaf(log, path);
6028 			if (ret < 0) {
6029 				goto next_dir_inode;
6030 			} else if (ret > 0) {
6031 				ret = 0;
6032 				goto next_dir_inode;
6033 			}
6034 			goto process_leaf;
6035 		}
6036 		if (min_key.offset < (u64)-1) {
6037 			min_key.offset++;
6038 			goto again;
6039 		}
6040 next_dir_inode:
6041 		list_del(&dir_elem->list);
6042 		kfree(dir_elem);
6043 	}
6044 
6045 	btrfs_free_path(path);
6046 	return ret;
6047 }
6048 
6049 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
6050 				 struct btrfs_inode *inode,
6051 				 struct btrfs_log_ctx *ctx)
6052 {
6053 	struct btrfs_fs_info *fs_info = trans->fs_info;
6054 	int ret;
6055 	struct btrfs_path *path;
6056 	struct btrfs_key key;
6057 	struct btrfs_root *root = inode->root;
6058 	const u64 ino = btrfs_ino(inode);
6059 
6060 	path = btrfs_alloc_path();
6061 	if (!path)
6062 		return -ENOMEM;
6063 	path->skip_locking = 1;
6064 	path->search_commit_root = 1;
6065 
6066 	key.objectid = ino;
6067 	key.type = BTRFS_INODE_REF_KEY;
6068 	key.offset = 0;
6069 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6070 	if (ret < 0)
6071 		goto out;
6072 
6073 	while (true) {
6074 		struct extent_buffer *leaf = path->nodes[0];
6075 		int slot = path->slots[0];
6076 		u32 cur_offset = 0;
6077 		u32 item_size;
6078 		unsigned long ptr;
6079 
6080 		if (slot >= btrfs_header_nritems(leaf)) {
6081 			ret = btrfs_next_leaf(root, path);
6082 			if (ret < 0)
6083 				goto out;
6084 			else if (ret > 0)
6085 				break;
6086 			continue;
6087 		}
6088 
6089 		btrfs_item_key_to_cpu(leaf, &key, slot);
6090 		/* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
6091 		if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
6092 			break;
6093 
6094 		item_size = btrfs_item_size_nr(leaf, slot);
6095 		ptr = btrfs_item_ptr_offset(leaf, slot);
6096 		while (cur_offset < item_size) {
6097 			struct btrfs_key inode_key;
6098 			struct inode *dir_inode;
6099 
6100 			inode_key.type = BTRFS_INODE_ITEM_KEY;
6101 			inode_key.offset = 0;
6102 
6103 			if (key.type == BTRFS_INODE_EXTREF_KEY) {
6104 				struct btrfs_inode_extref *extref;
6105 
6106 				extref = (struct btrfs_inode_extref *)
6107 					(ptr + cur_offset);
6108 				inode_key.objectid = btrfs_inode_extref_parent(
6109 					leaf, extref);
6110 				cur_offset += sizeof(*extref);
6111 				cur_offset += btrfs_inode_extref_name_len(leaf,
6112 					extref);
6113 			} else {
6114 				inode_key.objectid = key.offset;
6115 				cur_offset = item_size;
6116 			}
6117 
6118 			dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
6119 					       root);
6120 			/*
6121 			 * If the parent inode was deleted, return an error to
6122 			 * fallback to a transaction commit. This is to prevent
6123 			 * getting an inode that was moved from one parent A to
6124 			 * a parent B, got its former parent A deleted and then
6125 			 * it got fsync'ed, from existing at both parents after
6126 			 * a log replay (and the old parent still existing).
6127 			 * Example:
6128 			 *
6129 			 * mkdir /mnt/A
6130 			 * mkdir /mnt/B
6131 			 * touch /mnt/B/bar
6132 			 * sync
6133 			 * mv /mnt/B/bar /mnt/A/bar
6134 			 * mv -T /mnt/A /mnt/B
6135 			 * fsync /mnt/B/bar
6136 			 * <power fail>
6137 			 *
6138 			 * If we ignore the old parent B which got deleted,
6139 			 * after a log replay we would have file bar linked
6140 			 * at both parents and the old parent B would still
6141 			 * exist.
6142 			 */
6143 			if (IS_ERR(dir_inode)) {
6144 				ret = PTR_ERR(dir_inode);
6145 				goto out;
6146 			}
6147 
6148 			if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
6149 				btrfs_add_delayed_iput(dir_inode);
6150 				continue;
6151 			}
6152 
6153 			ctx->log_new_dentries = false;
6154 			ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
6155 					      LOG_INODE_ALL, ctx);
6156 			if (!ret && ctx->log_new_dentries)
6157 				ret = log_new_dir_dentries(trans, root,
6158 						   BTRFS_I(dir_inode), ctx);
6159 			btrfs_add_delayed_iput(dir_inode);
6160 			if (ret)
6161 				goto out;
6162 		}
6163 		path->slots[0]++;
6164 	}
6165 	ret = 0;
6166 out:
6167 	btrfs_free_path(path);
6168 	return ret;
6169 }
6170 
6171 static int log_new_ancestors(struct btrfs_trans_handle *trans,
6172 			     struct btrfs_root *root,
6173 			     struct btrfs_path *path,
6174 			     struct btrfs_log_ctx *ctx)
6175 {
6176 	struct btrfs_key found_key;
6177 
6178 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
6179 
6180 	while (true) {
6181 		struct btrfs_fs_info *fs_info = root->fs_info;
6182 		struct extent_buffer *leaf = path->nodes[0];
6183 		int slot = path->slots[0];
6184 		struct btrfs_key search_key;
6185 		struct inode *inode;
6186 		u64 ino;
6187 		int ret = 0;
6188 
6189 		btrfs_release_path(path);
6190 
6191 		ino = found_key.offset;
6192 
6193 		search_key.objectid = found_key.offset;
6194 		search_key.type = BTRFS_INODE_ITEM_KEY;
6195 		search_key.offset = 0;
6196 		inode = btrfs_iget(fs_info->sb, ino, root);
6197 		if (IS_ERR(inode))
6198 			return PTR_ERR(inode);
6199 
6200 		if (BTRFS_I(inode)->generation >= trans->transid &&
6201 		    need_log_inode(trans, BTRFS_I(inode)))
6202 			ret = btrfs_log_inode(trans, BTRFS_I(inode),
6203 					      LOG_INODE_EXISTS, ctx);
6204 		btrfs_add_delayed_iput(inode);
6205 		if (ret)
6206 			return ret;
6207 
6208 		if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
6209 			break;
6210 
6211 		search_key.type = BTRFS_INODE_REF_KEY;
6212 		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6213 		if (ret < 0)
6214 			return ret;
6215 
6216 		leaf = path->nodes[0];
6217 		slot = path->slots[0];
6218 		if (slot >= btrfs_header_nritems(leaf)) {
6219 			ret = btrfs_next_leaf(root, path);
6220 			if (ret < 0)
6221 				return ret;
6222 			else if (ret > 0)
6223 				return -ENOENT;
6224 			leaf = path->nodes[0];
6225 			slot = path->slots[0];
6226 		}
6227 
6228 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6229 		if (found_key.objectid != search_key.objectid ||
6230 		    found_key.type != BTRFS_INODE_REF_KEY)
6231 			return -ENOENT;
6232 	}
6233 	return 0;
6234 }
6235 
6236 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6237 				  struct btrfs_inode *inode,
6238 				  struct dentry *parent,
6239 				  struct btrfs_log_ctx *ctx)
6240 {
6241 	struct btrfs_root *root = inode->root;
6242 	struct dentry *old_parent = NULL;
6243 	struct super_block *sb = inode->vfs_inode.i_sb;
6244 	int ret = 0;
6245 
6246 	while (true) {
6247 		if (!parent || d_really_is_negative(parent) ||
6248 		    sb != parent->d_sb)
6249 			break;
6250 
6251 		inode = BTRFS_I(d_inode(parent));
6252 		if (root != inode->root)
6253 			break;
6254 
6255 		if (inode->generation >= trans->transid &&
6256 		    need_log_inode(trans, inode)) {
6257 			ret = btrfs_log_inode(trans, inode,
6258 					      LOG_INODE_EXISTS, ctx);
6259 			if (ret)
6260 				break;
6261 		}
6262 		if (IS_ROOT(parent))
6263 			break;
6264 
6265 		parent = dget_parent(parent);
6266 		dput(old_parent);
6267 		old_parent = parent;
6268 	}
6269 	dput(old_parent);
6270 
6271 	return ret;
6272 }
6273 
6274 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6275 				 struct btrfs_inode *inode,
6276 				 struct dentry *parent,
6277 				 struct btrfs_log_ctx *ctx)
6278 {
6279 	struct btrfs_root *root = inode->root;
6280 	const u64 ino = btrfs_ino(inode);
6281 	struct btrfs_path *path;
6282 	struct btrfs_key search_key;
6283 	int ret;
6284 
6285 	/*
6286 	 * For a single hard link case, go through a fast path that does not
6287 	 * need to iterate the fs/subvolume tree.
6288 	 */
6289 	if (inode->vfs_inode.i_nlink < 2)
6290 		return log_new_ancestors_fast(trans, inode, parent, ctx);
6291 
6292 	path = btrfs_alloc_path();
6293 	if (!path)
6294 		return -ENOMEM;
6295 
6296 	search_key.objectid = ino;
6297 	search_key.type = BTRFS_INODE_REF_KEY;
6298 	search_key.offset = 0;
6299 again:
6300 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6301 	if (ret < 0)
6302 		goto out;
6303 	if (ret == 0)
6304 		path->slots[0]++;
6305 
6306 	while (true) {
6307 		struct extent_buffer *leaf = path->nodes[0];
6308 		int slot = path->slots[0];
6309 		struct btrfs_key found_key;
6310 
6311 		if (slot >= btrfs_header_nritems(leaf)) {
6312 			ret = btrfs_next_leaf(root, path);
6313 			if (ret < 0)
6314 				goto out;
6315 			else if (ret > 0)
6316 				break;
6317 			continue;
6318 		}
6319 
6320 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6321 		if (found_key.objectid != ino ||
6322 		    found_key.type > BTRFS_INODE_EXTREF_KEY)
6323 			break;
6324 
6325 		/*
6326 		 * Don't deal with extended references because they are rare
6327 		 * cases and too complex to deal with (we would need to keep
6328 		 * track of which subitem we are processing for each item in
6329 		 * this loop, etc). So just return some error to fallback to
6330 		 * a transaction commit.
6331 		 */
6332 		if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6333 			ret = -EMLINK;
6334 			goto out;
6335 		}
6336 
6337 		/*
6338 		 * Logging ancestors needs to do more searches on the fs/subvol
6339 		 * tree, so it releases the path as needed to avoid deadlocks.
6340 		 * Keep track of the last inode ref key and resume from that key
6341 		 * after logging all new ancestors for the current hard link.
6342 		 */
6343 		memcpy(&search_key, &found_key, sizeof(search_key));
6344 
6345 		ret = log_new_ancestors(trans, root, path, ctx);
6346 		if (ret)
6347 			goto out;
6348 		btrfs_release_path(path);
6349 		goto again;
6350 	}
6351 	ret = 0;
6352 out:
6353 	btrfs_free_path(path);
6354 	return ret;
6355 }
6356 
6357 /*
6358  * helper function around btrfs_log_inode to make sure newly created
6359  * parent directories also end up in the log.  A minimal inode and backref
6360  * only logging is done of any parent directories that are older than
6361  * the last committed transaction
6362  */
6363 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6364 				  struct btrfs_inode *inode,
6365 				  struct dentry *parent,
6366 				  int inode_only,
6367 				  struct btrfs_log_ctx *ctx)
6368 {
6369 	struct btrfs_root *root = inode->root;
6370 	struct btrfs_fs_info *fs_info = root->fs_info;
6371 	int ret = 0;
6372 	bool log_dentries = false;
6373 
6374 	if (btrfs_test_opt(fs_info, NOTREELOG)) {
6375 		ret = 1;
6376 		goto end_no_trans;
6377 	}
6378 
6379 	if (btrfs_root_refs(&root->root_item) == 0) {
6380 		ret = 1;
6381 		goto end_no_trans;
6382 	}
6383 
6384 	/*
6385 	 * Skip already logged inodes or inodes corresponding to tmpfiles
6386 	 * (since logging them is pointless, a link count of 0 means they
6387 	 * will never be accessible).
6388 	 */
6389 	if ((btrfs_inode_in_log(inode, trans->transid) &&
6390 	     list_empty(&ctx->ordered_extents)) ||
6391 	    inode->vfs_inode.i_nlink == 0) {
6392 		ret = BTRFS_NO_LOG_SYNC;
6393 		goto end_no_trans;
6394 	}
6395 
6396 	ret = start_log_trans(trans, root, ctx);
6397 	if (ret)
6398 		goto end_no_trans;
6399 
6400 	ret = btrfs_log_inode(trans, inode, inode_only, ctx);
6401 	if (ret)
6402 		goto end_trans;
6403 
6404 	/*
6405 	 * for regular files, if its inode is already on disk, we don't
6406 	 * have to worry about the parents at all.  This is because
6407 	 * we can use the last_unlink_trans field to record renames
6408 	 * and other fun in this file.
6409 	 */
6410 	if (S_ISREG(inode->vfs_inode.i_mode) &&
6411 	    inode->generation < trans->transid &&
6412 	    inode->last_unlink_trans < trans->transid) {
6413 		ret = 0;
6414 		goto end_trans;
6415 	}
6416 
6417 	if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries)
6418 		log_dentries = true;
6419 
6420 	/*
6421 	 * On unlink we must make sure all our current and old parent directory
6422 	 * inodes are fully logged. This is to prevent leaving dangling
6423 	 * directory index entries in directories that were our parents but are
6424 	 * not anymore. Not doing this results in old parent directory being
6425 	 * impossible to delete after log replay (rmdir will always fail with
6426 	 * error -ENOTEMPTY).
6427 	 *
6428 	 * Example 1:
6429 	 *
6430 	 * mkdir testdir
6431 	 * touch testdir/foo
6432 	 * ln testdir/foo testdir/bar
6433 	 * sync
6434 	 * unlink testdir/bar
6435 	 * xfs_io -c fsync testdir/foo
6436 	 * <power failure>
6437 	 * mount fs, triggers log replay
6438 	 *
6439 	 * If we don't log the parent directory (testdir), after log replay the
6440 	 * directory still has an entry pointing to the file inode using the bar
6441 	 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6442 	 * the file inode has a link count of 1.
6443 	 *
6444 	 * Example 2:
6445 	 *
6446 	 * mkdir testdir
6447 	 * touch foo
6448 	 * ln foo testdir/foo2
6449 	 * ln foo testdir/foo3
6450 	 * sync
6451 	 * unlink testdir/foo3
6452 	 * xfs_io -c fsync foo
6453 	 * <power failure>
6454 	 * mount fs, triggers log replay
6455 	 *
6456 	 * Similar as the first example, after log replay the parent directory
6457 	 * testdir still has an entry pointing to the inode file with name foo3
6458 	 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6459 	 * and has a link count of 2.
6460 	 */
6461 	if (inode->last_unlink_trans >= trans->transid) {
6462 		ret = btrfs_log_all_parents(trans, inode, ctx);
6463 		if (ret)
6464 			goto end_trans;
6465 	}
6466 
6467 	ret = log_all_new_ancestors(trans, inode, parent, ctx);
6468 	if (ret)
6469 		goto end_trans;
6470 
6471 	if (log_dentries)
6472 		ret = log_new_dir_dentries(trans, root, inode, ctx);
6473 	else
6474 		ret = 0;
6475 end_trans:
6476 	if (ret < 0) {
6477 		btrfs_set_log_full_commit(trans);
6478 		ret = 1;
6479 	}
6480 
6481 	if (ret)
6482 		btrfs_remove_log_ctx(root, ctx);
6483 	btrfs_end_log_trans(root);
6484 end_no_trans:
6485 	return ret;
6486 }
6487 
6488 /*
6489  * it is not safe to log dentry if the chunk root has added new
6490  * chunks.  This returns 0 if the dentry was logged, and 1 otherwise.
6491  * If this returns 1, you must commit the transaction to safely get your
6492  * data on disk.
6493  */
6494 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6495 			  struct dentry *dentry,
6496 			  struct btrfs_log_ctx *ctx)
6497 {
6498 	struct dentry *parent = dget_parent(dentry);
6499 	int ret;
6500 
6501 	ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6502 				     LOG_INODE_ALL, ctx);
6503 	dput(parent);
6504 
6505 	return ret;
6506 }
6507 
6508 /*
6509  * should be called during mount to recover any replay any log trees
6510  * from the FS
6511  */
6512 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6513 {
6514 	int ret;
6515 	struct btrfs_path *path;
6516 	struct btrfs_trans_handle *trans;
6517 	struct btrfs_key key;
6518 	struct btrfs_key found_key;
6519 	struct btrfs_root *log;
6520 	struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6521 	struct walk_control wc = {
6522 		.process_func = process_one_buffer,
6523 		.stage = LOG_WALK_PIN_ONLY,
6524 	};
6525 
6526 	path = btrfs_alloc_path();
6527 	if (!path)
6528 		return -ENOMEM;
6529 
6530 	set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6531 
6532 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
6533 	if (IS_ERR(trans)) {
6534 		ret = PTR_ERR(trans);
6535 		goto error;
6536 	}
6537 
6538 	wc.trans = trans;
6539 	wc.pin = 1;
6540 
6541 	ret = walk_log_tree(trans, log_root_tree, &wc);
6542 	if (ret) {
6543 		btrfs_abort_transaction(trans, ret);
6544 		goto error;
6545 	}
6546 
6547 again:
6548 	key.objectid = BTRFS_TREE_LOG_OBJECTID;
6549 	key.offset = (u64)-1;
6550 	key.type = BTRFS_ROOT_ITEM_KEY;
6551 
6552 	while (1) {
6553 		ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6554 
6555 		if (ret < 0) {
6556 			btrfs_abort_transaction(trans, ret);
6557 			goto error;
6558 		}
6559 		if (ret > 0) {
6560 			if (path->slots[0] == 0)
6561 				break;
6562 			path->slots[0]--;
6563 		}
6564 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6565 				      path->slots[0]);
6566 		btrfs_release_path(path);
6567 		if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6568 			break;
6569 
6570 		log = btrfs_read_tree_root(log_root_tree, &found_key);
6571 		if (IS_ERR(log)) {
6572 			ret = PTR_ERR(log);
6573 			btrfs_abort_transaction(trans, ret);
6574 			goto error;
6575 		}
6576 
6577 		wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6578 						   true);
6579 		if (IS_ERR(wc.replay_dest)) {
6580 			ret = PTR_ERR(wc.replay_dest);
6581 
6582 			/*
6583 			 * We didn't find the subvol, likely because it was
6584 			 * deleted.  This is ok, simply skip this log and go to
6585 			 * the next one.
6586 			 *
6587 			 * We need to exclude the root because we can't have
6588 			 * other log replays overwriting this log as we'll read
6589 			 * it back in a few more times.  This will keep our
6590 			 * block from being modified, and we'll just bail for
6591 			 * each subsequent pass.
6592 			 */
6593 			if (ret == -ENOENT)
6594 				ret = btrfs_pin_extent_for_log_replay(trans,
6595 							log->node->start,
6596 							log->node->len);
6597 			btrfs_put_root(log);
6598 
6599 			if (!ret)
6600 				goto next;
6601 			btrfs_abort_transaction(trans, ret);
6602 			goto error;
6603 		}
6604 
6605 		wc.replay_dest->log_root = log;
6606 		ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
6607 		if (ret)
6608 			/* The loop needs to continue due to the root refs */
6609 			btrfs_abort_transaction(trans, ret);
6610 		else
6611 			ret = walk_log_tree(trans, log, &wc);
6612 
6613 		if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6614 			ret = fixup_inode_link_counts(trans, wc.replay_dest,
6615 						      path);
6616 			if (ret)
6617 				btrfs_abort_transaction(trans, ret);
6618 		}
6619 
6620 		if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6621 			struct btrfs_root *root = wc.replay_dest;
6622 
6623 			btrfs_release_path(path);
6624 
6625 			/*
6626 			 * We have just replayed everything, and the highest
6627 			 * objectid of fs roots probably has changed in case
6628 			 * some inode_item's got replayed.
6629 			 *
6630 			 * root->objectid_mutex is not acquired as log replay
6631 			 * could only happen during mount.
6632 			 */
6633 			ret = btrfs_init_root_free_objectid(root);
6634 			if (ret)
6635 				btrfs_abort_transaction(trans, ret);
6636 		}
6637 
6638 		wc.replay_dest->log_root = NULL;
6639 		btrfs_put_root(wc.replay_dest);
6640 		btrfs_put_root(log);
6641 
6642 		if (ret)
6643 			goto error;
6644 next:
6645 		if (found_key.offset == 0)
6646 			break;
6647 		key.offset = found_key.offset - 1;
6648 	}
6649 	btrfs_release_path(path);
6650 
6651 	/* step one is to pin it all, step two is to replay just inodes */
6652 	if (wc.pin) {
6653 		wc.pin = 0;
6654 		wc.process_func = replay_one_buffer;
6655 		wc.stage = LOG_WALK_REPLAY_INODES;
6656 		goto again;
6657 	}
6658 	/* step three is to replay everything */
6659 	if (wc.stage < LOG_WALK_REPLAY_ALL) {
6660 		wc.stage++;
6661 		goto again;
6662 	}
6663 
6664 	btrfs_free_path(path);
6665 
6666 	/* step 4: commit the transaction, which also unpins the blocks */
6667 	ret = btrfs_commit_transaction(trans);
6668 	if (ret)
6669 		return ret;
6670 
6671 	log_root_tree->log_root = NULL;
6672 	clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6673 	btrfs_put_root(log_root_tree);
6674 
6675 	return 0;
6676 error:
6677 	if (wc.trans)
6678 		btrfs_end_transaction(wc.trans);
6679 	clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6680 	btrfs_free_path(path);
6681 	return ret;
6682 }
6683 
6684 /*
6685  * there are some corner cases where we want to force a full
6686  * commit instead of allowing a directory to be logged.
6687  *
6688  * They revolve around files there were unlinked from the directory, and
6689  * this function updates the parent directory so that a full commit is
6690  * properly done if it is fsync'd later after the unlinks are done.
6691  *
6692  * Must be called before the unlink operations (updates to the subvolume tree,
6693  * inodes, etc) are done.
6694  */
6695 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6696 			     struct btrfs_inode *dir, struct btrfs_inode *inode,
6697 			     int for_rename)
6698 {
6699 	/*
6700 	 * when we're logging a file, if it hasn't been renamed
6701 	 * or unlinked, and its inode is fully committed on disk,
6702 	 * we don't have to worry about walking up the directory chain
6703 	 * to log its parents.
6704 	 *
6705 	 * So, we use the last_unlink_trans field to put this transid
6706 	 * into the file.  When the file is logged we check it and
6707 	 * don't log the parents if the file is fully on disk.
6708 	 */
6709 	mutex_lock(&inode->log_mutex);
6710 	inode->last_unlink_trans = trans->transid;
6711 	mutex_unlock(&inode->log_mutex);
6712 
6713 	/*
6714 	 * if this directory was already logged any new
6715 	 * names for this file/dir will get recorded
6716 	 */
6717 	if (dir->logged_trans == trans->transid)
6718 		return;
6719 
6720 	/*
6721 	 * if the inode we're about to unlink was logged,
6722 	 * the log will be properly updated for any new names
6723 	 */
6724 	if (inode->logged_trans == trans->transid)
6725 		return;
6726 
6727 	/*
6728 	 * when renaming files across directories, if the directory
6729 	 * there we're unlinking from gets fsync'd later on, there's
6730 	 * no way to find the destination directory later and fsync it
6731 	 * properly.  So, we have to be conservative and force commits
6732 	 * so the new name gets discovered.
6733 	 */
6734 	if (for_rename)
6735 		goto record;
6736 
6737 	/* we can safely do the unlink without any special recording */
6738 	return;
6739 
6740 record:
6741 	mutex_lock(&dir->log_mutex);
6742 	dir->last_unlink_trans = trans->transid;
6743 	mutex_unlock(&dir->log_mutex);
6744 }
6745 
6746 /*
6747  * Make sure that if someone attempts to fsync the parent directory of a deleted
6748  * snapshot, it ends up triggering a transaction commit. This is to guarantee
6749  * that after replaying the log tree of the parent directory's root we will not
6750  * see the snapshot anymore and at log replay time we will not see any log tree
6751  * corresponding to the deleted snapshot's root, which could lead to replaying
6752  * it after replaying the log tree of the parent directory (which would replay
6753  * the snapshot delete operation).
6754  *
6755  * Must be called before the actual snapshot destroy operation (updates to the
6756  * parent root and tree of tree roots trees, etc) are done.
6757  */
6758 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6759 				   struct btrfs_inode *dir)
6760 {
6761 	mutex_lock(&dir->log_mutex);
6762 	dir->last_unlink_trans = trans->transid;
6763 	mutex_unlock(&dir->log_mutex);
6764 }
6765 
6766 /*
6767  * Call this after adding a new name for a file and it will properly
6768  * update the log to reflect the new name.
6769  */
6770 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6771 			struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6772 			struct dentry *parent)
6773 {
6774 	struct btrfs_log_ctx ctx;
6775 
6776 	/*
6777 	 * this will force the logging code to walk the dentry chain
6778 	 * up for the file
6779 	 */
6780 	if (!S_ISDIR(inode->vfs_inode.i_mode))
6781 		inode->last_unlink_trans = trans->transid;
6782 
6783 	/*
6784 	 * if this inode hasn't been logged and directory we're renaming it
6785 	 * from hasn't been logged, we don't need to log it
6786 	 */
6787 	if (!inode_logged(trans, inode) &&
6788 	    (!old_dir || !inode_logged(trans, old_dir)))
6789 		return;
6790 
6791 	/*
6792 	 * If we are doing a rename (old_dir is not NULL) from a directory that
6793 	 * was previously logged, make sure the next log attempt on the directory
6794 	 * is not skipped and logs the inode again. This is because the log may
6795 	 * not currently be authoritative for a range including the old
6796 	 * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
6797 	 * sure after a log replay we do not end up with both the new and old
6798 	 * dentries around (in case the inode is a directory we would have a
6799 	 * directory with two hard links and 2 inode references for different
6800 	 * parents). The next log attempt of old_dir will happen at
6801 	 * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
6802 	 * below, because we have previously set inode->last_unlink_trans to the
6803 	 * current transaction ID, either here or at btrfs_record_unlink_dir() in
6804 	 * case inode is a directory.
6805 	 */
6806 	if (old_dir)
6807 		old_dir->logged_trans = 0;
6808 
6809 	btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6810 	ctx.logging_new_name = true;
6811 	/*
6812 	 * We don't care about the return value. If we fail to log the new name
6813 	 * then we know the next attempt to sync the log will fallback to a full
6814 	 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6815 	 * we don't need to worry about getting a log committed that has an
6816 	 * inconsistent state after a rename operation.
6817 	 */
6818 	btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
6819 }
6820 
6821