1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include <linux/error-injection.h>
11 #include "messages.h"
12 #include "ctree.h"
13 #include "disk-io.h"
14 #include "transaction.h"
15 #include "print-tree.h"
16 #include "locking.h"
17 #include "volumes.h"
18 #include "qgroup.h"
19 #include "tree-mod-log.h"
20 #include "tree-checker.h"
21 #include "fs.h"
22 #include "accessors.h"
23 #include "extent-tree.h"
24 #include "relocation.h"
25 #include "file-item.h"
26
27 static struct kmem_cache *btrfs_path_cachep;
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40
41 static const struct btrfs_csums {
42 u16 size;
43 const char name[10];
44 const char driver[12];
45 } btrfs_csums[] = {
46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
50 .driver = "blake2b-256" },
51 };
52
53 /*
54 * The leaf data grows from end-to-front in the node. this returns the address
55 * of the start of the last item, which is the stop of the leaf data stack.
56 */
leaf_data_end(const struct extent_buffer * leaf)57 static unsigned int leaf_data_end(const struct extent_buffer *leaf)
58 {
59 u32 nr = btrfs_header_nritems(leaf);
60
61 if (nr == 0)
62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
63 return btrfs_item_offset(leaf, nr - 1);
64 }
65
66 /*
67 * Move data in a @leaf (using memmove, safe for overlapping ranges).
68 *
69 * @leaf: leaf that we're doing a memmove on
70 * @dst_offset: item data offset we're moving to
71 * @src_offset: item data offset were' moving from
72 * @len: length of the data we're moving
73 *
74 * Wrapper around memmove_extent_buffer() that takes into account the header on
75 * the leaf. The btrfs_item offset's start directly after the header, so we
76 * have to adjust any offsets to account for the header in the leaf. This
77 * handles that math to simplify the callers.
78 */
memmove_leaf_data(const struct extent_buffer * leaf,unsigned long dst_offset,unsigned long src_offset,unsigned long len)79 static inline void memmove_leaf_data(const struct extent_buffer *leaf,
80 unsigned long dst_offset,
81 unsigned long src_offset,
82 unsigned long len)
83 {
84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
85 btrfs_item_nr_offset(leaf, 0) + src_offset, len);
86 }
87
88 /*
89 * Copy item data from @src into @dst at the given @offset.
90 *
91 * @dst: destination leaf that we're copying into
92 * @src: source leaf that we're copying from
93 * @dst_offset: item data offset we're copying to
94 * @src_offset: item data offset were' copying from
95 * @len: length of the data we're copying
96 *
97 * Wrapper around copy_extent_buffer() that takes into account the header on
98 * the leaf. The btrfs_item offset's start directly after the header, so we
99 * have to adjust any offsets to account for the header in the leaf. This
100 * handles that math to simplify the callers.
101 */
copy_leaf_data(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)102 static inline void copy_leaf_data(const struct extent_buffer *dst,
103 const struct extent_buffer *src,
104 unsigned long dst_offset,
105 unsigned long src_offset, unsigned long len)
106 {
107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
108 btrfs_item_nr_offset(src, 0) + src_offset, len);
109 }
110
111 /*
112 * Move items in a @leaf (using memmove).
113 *
114 * @dst: destination leaf for the items
115 * @dst_item: the item nr we're copying into
116 * @src_item: the item nr we're copying from
117 * @nr_items: the number of items to copy
118 *
119 * Wrapper around memmove_extent_buffer() that does the math to get the
120 * appropriate offsets into the leaf from the item numbers.
121 */
memmove_leaf_items(const struct extent_buffer * leaf,int dst_item,int src_item,int nr_items)122 static inline void memmove_leaf_items(const struct extent_buffer *leaf,
123 int dst_item, int src_item, int nr_items)
124 {
125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
126 btrfs_item_nr_offset(leaf, src_item),
127 nr_items * sizeof(struct btrfs_item));
128 }
129
130 /*
131 * Copy items from @src into @dst at the given @offset.
132 *
133 * @dst: destination leaf for the items
134 * @src: source leaf for the items
135 * @dst_item: the item nr we're copying into
136 * @src_item: the item nr we're copying from
137 * @nr_items: the number of items to copy
138 *
139 * Wrapper around copy_extent_buffer() that does the math to get the
140 * appropriate offsets into the leaf from the item numbers.
141 */
copy_leaf_items(const struct extent_buffer * dst,const struct extent_buffer * src,int dst_item,int src_item,int nr_items)142 static inline void copy_leaf_items(const struct extent_buffer *dst,
143 const struct extent_buffer *src,
144 int dst_item, int src_item, int nr_items)
145 {
146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
147 btrfs_item_nr_offset(src, src_item),
148 nr_items * sizeof(struct btrfs_item));
149 }
150
151 /* This exists for btrfs-progs usages. */
btrfs_csum_type_size(u16 type)152 u16 btrfs_csum_type_size(u16 type)
153 {
154 return btrfs_csums[type].size;
155 }
156
btrfs_super_csum_size(const struct btrfs_super_block * s)157 int btrfs_super_csum_size(const struct btrfs_super_block *s)
158 {
159 u16 t = btrfs_super_csum_type(s);
160 /*
161 * csum type is validated at mount time
162 */
163 return btrfs_csum_type_size(t);
164 }
165
btrfs_super_csum_name(u16 csum_type)166 const char *btrfs_super_csum_name(u16 csum_type)
167 {
168 /* csum type is validated at mount time */
169 return btrfs_csums[csum_type].name;
170 }
171
172 /*
173 * Return driver name if defined, otherwise the name that's also a valid driver
174 * name
175 */
btrfs_super_csum_driver(u16 csum_type)176 const char *btrfs_super_csum_driver(u16 csum_type)
177 {
178 /* csum type is validated at mount time */
179 return btrfs_csums[csum_type].driver[0] ?
180 btrfs_csums[csum_type].driver :
181 btrfs_csums[csum_type].name;
182 }
183
btrfs_get_num_csums(void)184 size_t __attribute_const__ btrfs_get_num_csums(void)
185 {
186 return ARRAY_SIZE(btrfs_csums);
187 }
188
btrfs_alloc_path(void)189 struct btrfs_path *btrfs_alloc_path(void)
190 {
191 might_sleep();
192
193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
194 }
195
196 /* this also releases the path */
btrfs_free_path(struct btrfs_path * p)197 void btrfs_free_path(struct btrfs_path *p)
198 {
199 if (!p)
200 return;
201 btrfs_release_path(p);
202 kmem_cache_free(btrfs_path_cachep, p);
203 }
204
205 /*
206 * path release drops references on the extent buffers in the path
207 * and it drops any locks held by this path
208 *
209 * It is safe to call this on paths that no locks or extent buffers held.
210 */
btrfs_release_path(struct btrfs_path * p)211 noinline void btrfs_release_path(struct btrfs_path *p)
212 {
213 int i;
214
215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
216 p->slots[i] = 0;
217 if (!p->nodes[i])
218 continue;
219 if (p->locks[i]) {
220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
221 p->locks[i] = 0;
222 }
223 free_extent_buffer(p->nodes[i]);
224 p->nodes[i] = NULL;
225 }
226 }
227
228 /*
229 * We want the transaction abort to print stack trace only for errors where the
230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
231 * caused by external factors.
232 */
abort_should_print_stack(int errno)233 bool __cold abort_should_print_stack(int errno)
234 {
235 switch (errno) {
236 case -EIO:
237 case -EROFS:
238 case -ENOMEM:
239 return false;
240 }
241 return true;
242 }
243
244 /*
245 * safely gets a reference on the root node of a tree. A lock
246 * is not taken, so a concurrent writer may put a different node
247 * at the root of the tree. See btrfs_lock_root_node for the
248 * looping required.
249 *
250 * The extent buffer returned by this has a reference taken, so
251 * it won't disappear. It may stop being the root of the tree
252 * at any time because there are no locks held.
253 */
btrfs_root_node(struct btrfs_root * root)254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
255 {
256 struct extent_buffer *eb;
257
258 while (1) {
259 rcu_read_lock();
260 eb = rcu_dereference(root->node);
261
262 /*
263 * RCU really hurts here, we could free up the root node because
264 * it was COWed but we may not get the new root node yet so do
265 * the inc_not_zero dance and if it doesn't work then
266 * synchronize_rcu and try again.
267 */
268 if (atomic_inc_not_zero(&eb->refs)) {
269 rcu_read_unlock();
270 break;
271 }
272 rcu_read_unlock();
273 synchronize_rcu();
274 }
275 return eb;
276 }
277
278 /*
279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
280 * just get put onto a simple dirty list. Transaction walks this list to make
281 * sure they get properly updated on disk.
282 */
add_root_to_dirty_list(struct btrfs_root * root)283 static void add_root_to_dirty_list(struct btrfs_root *root)
284 {
285 struct btrfs_fs_info *fs_info = root->fs_info;
286
287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
289 return;
290
291 spin_lock(&fs_info->trans_lock);
292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
293 /* Want the extent tree to be the last on the list */
294 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
295 list_move_tail(&root->dirty_list,
296 &fs_info->dirty_cowonly_roots);
297 else
298 list_move(&root->dirty_list,
299 &fs_info->dirty_cowonly_roots);
300 }
301 spin_unlock(&fs_info->trans_lock);
302 }
303
304 /*
305 * used by snapshot creation to make a copy of a root for a tree with
306 * a given objectid. The buffer with the new root node is returned in
307 * cow_ret, and this func returns zero on success or a negative error code.
308 */
btrfs_copy_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer ** cow_ret,u64 new_root_objectid)309 int btrfs_copy_root(struct btrfs_trans_handle *trans,
310 struct btrfs_root *root,
311 struct extent_buffer *buf,
312 struct extent_buffer **cow_ret, u64 new_root_objectid)
313 {
314 struct btrfs_fs_info *fs_info = root->fs_info;
315 struct extent_buffer *cow;
316 int ret = 0;
317 int level;
318 struct btrfs_disk_key disk_key;
319
320 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
321 trans->transid != fs_info->running_transaction->transid);
322 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
323 trans->transid != root->last_trans);
324
325 level = btrfs_header_level(buf);
326 if (level == 0)
327 btrfs_item_key(buf, &disk_key, 0);
328 else
329 btrfs_node_key(buf, &disk_key, 0);
330
331 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
332 &disk_key, level, buf->start, 0,
333 BTRFS_NESTING_NEW_ROOT);
334 if (IS_ERR(cow))
335 return PTR_ERR(cow);
336
337 copy_extent_buffer_full(cow, buf);
338 btrfs_set_header_bytenr(cow, cow->start);
339 btrfs_set_header_generation(cow, trans->transid);
340 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
341 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
342 BTRFS_HEADER_FLAG_RELOC);
343 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
344 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
345 else
346 btrfs_set_header_owner(cow, new_root_objectid);
347
348 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
349
350 WARN_ON(btrfs_header_generation(buf) > trans->transid);
351 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
353 else
354 ret = btrfs_inc_ref(trans, root, cow, 0);
355 if (ret) {
356 btrfs_tree_unlock(cow);
357 free_extent_buffer(cow);
358 btrfs_abort_transaction(trans, ret);
359 return ret;
360 }
361
362 btrfs_mark_buffer_dirty(trans, cow);
363 *cow_ret = cow;
364 return 0;
365 }
366
367 /*
368 * check if the tree block can be shared by multiple trees
369 */
btrfs_block_can_be_shared(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)370 int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
371 struct btrfs_root *root,
372 struct extent_buffer *buf)
373 {
374 /*
375 * Tree blocks not in shareable trees and tree roots are never shared.
376 * If a block was allocated after the last snapshot and the block was
377 * not allocated by tree relocation, we know the block is not shared.
378 */
379 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
380 buf != root->node &&
381 (btrfs_header_generation(buf) <=
382 btrfs_root_last_snapshot(&root->root_item) ||
383 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
384 if (buf != root->commit_root)
385 return 1;
386 /*
387 * An extent buffer that used to be the commit root may still be
388 * shared because the tree height may have increased and it
389 * became a child of a higher level root. This can happen when
390 * snapshotting a subvolume created in the current transaction.
391 */
392 if (btrfs_header_generation(buf) == trans->transid)
393 return 1;
394 }
395
396 return 0;
397 }
398
update_ref_for_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * cow,int * last_ref)399 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
400 struct btrfs_root *root,
401 struct extent_buffer *buf,
402 struct extent_buffer *cow,
403 int *last_ref)
404 {
405 struct btrfs_fs_info *fs_info = root->fs_info;
406 u64 refs;
407 u64 owner;
408 u64 flags;
409 u64 new_flags = 0;
410 int ret;
411
412 /*
413 * Backrefs update rules:
414 *
415 * Always use full backrefs for extent pointers in tree block
416 * allocated by tree relocation.
417 *
418 * If a shared tree block is no longer referenced by its owner
419 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
420 * use full backrefs for extent pointers in tree block.
421 *
422 * If a tree block is been relocating
423 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
424 * use full backrefs for extent pointers in tree block.
425 * The reason for this is some operations (such as drop tree)
426 * are only allowed for blocks use full backrefs.
427 */
428
429 if (btrfs_block_can_be_shared(trans, root, buf)) {
430 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
431 btrfs_header_level(buf), 1,
432 &refs, &flags);
433 if (ret)
434 return ret;
435 if (unlikely(refs == 0)) {
436 btrfs_crit(fs_info,
437 "found 0 references for tree block at bytenr %llu level %d root %llu",
438 buf->start, btrfs_header_level(buf),
439 btrfs_root_id(root));
440 ret = -EUCLEAN;
441 btrfs_abort_transaction(trans, ret);
442 return ret;
443 }
444 } else {
445 refs = 1;
446 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
447 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
448 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
449 else
450 flags = 0;
451 }
452
453 owner = btrfs_header_owner(buf);
454 if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
455 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
456 btrfs_crit(fs_info,
457 "found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
458 buf->start, btrfs_header_level(buf),
459 btrfs_root_id(root), refs, flags);
460 ret = -EUCLEAN;
461 btrfs_abort_transaction(trans, ret);
462 return ret;
463 }
464
465 if (refs > 1) {
466 if ((owner == root->root_key.objectid ||
467 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
468 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
469 ret = btrfs_inc_ref(trans, root, buf, 1);
470 if (ret)
471 return ret;
472
473 if (root->root_key.objectid ==
474 BTRFS_TREE_RELOC_OBJECTID) {
475 ret = btrfs_dec_ref(trans, root, buf, 0);
476 if (ret)
477 return ret;
478 ret = btrfs_inc_ref(trans, root, cow, 1);
479 if (ret)
480 return ret;
481 }
482 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
483 } else {
484
485 if (root->root_key.objectid ==
486 BTRFS_TREE_RELOC_OBJECTID)
487 ret = btrfs_inc_ref(trans, root, cow, 1);
488 else
489 ret = btrfs_inc_ref(trans, root, cow, 0);
490 if (ret)
491 return ret;
492 }
493 if (new_flags != 0) {
494 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags);
495 if (ret)
496 return ret;
497 }
498 } else {
499 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
500 if (root->root_key.objectid ==
501 BTRFS_TREE_RELOC_OBJECTID)
502 ret = btrfs_inc_ref(trans, root, cow, 1);
503 else
504 ret = btrfs_inc_ref(trans, root, cow, 0);
505 if (ret)
506 return ret;
507 ret = btrfs_dec_ref(trans, root, buf, 1);
508 if (ret)
509 return ret;
510 }
511 btrfs_clear_buffer_dirty(trans, buf);
512 *last_ref = 1;
513 }
514 return 0;
515 }
516
517 /*
518 * does the dirty work in cow of a single block. The parent block (if
519 * supplied) is updated to point to the new cow copy. The new buffer is marked
520 * dirty and returned locked. If you modify the block it needs to be marked
521 * dirty again.
522 *
523 * search_start -- an allocation hint for the new block
524 *
525 * empty_size -- a hint that you plan on doing more cow. This is the size in
526 * bytes the allocator should try to find free next to the block it returns.
527 * This is just a hint and may be ignored by the allocator.
528 */
__btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,u64 search_start,u64 empty_size,enum btrfs_lock_nesting nest)529 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
530 struct btrfs_root *root,
531 struct extent_buffer *buf,
532 struct extent_buffer *parent, int parent_slot,
533 struct extent_buffer **cow_ret,
534 u64 search_start, u64 empty_size,
535 enum btrfs_lock_nesting nest)
536 {
537 struct btrfs_fs_info *fs_info = root->fs_info;
538 struct btrfs_disk_key disk_key;
539 struct extent_buffer *cow;
540 int level, ret;
541 int last_ref = 0;
542 int unlock_orig = 0;
543 u64 parent_start = 0;
544
545 if (*cow_ret == buf)
546 unlock_orig = 1;
547
548 btrfs_assert_tree_write_locked(buf);
549
550 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
551 trans->transid != fs_info->running_transaction->transid);
552 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
553 trans->transid != root->last_trans);
554
555 level = btrfs_header_level(buf);
556
557 if (level == 0)
558 btrfs_item_key(buf, &disk_key, 0);
559 else
560 btrfs_node_key(buf, &disk_key, 0);
561
562 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
563 parent_start = parent->start;
564
565 cow = btrfs_alloc_tree_block(trans, root, parent_start,
566 root->root_key.objectid, &disk_key, level,
567 search_start, empty_size, nest);
568 if (IS_ERR(cow))
569 return PTR_ERR(cow);
570
571 /* cow is set to blocking by btrfs_init_new_buffer */
572
573 copy_extent_buffer_full(cow, buf);
574 btrfs_set_header_bytenr(cow, cow->start);
575 btrfs_set_header_generation(cow, trans->transid);
576 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
577 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
578 BTRFS_HEADER_FLAG_RELOC);
579 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
580 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
581 else
582 btrfs_set_header_owner(cow, root->root_key.objectid);
583
584 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
585
586 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
587 if (ret) {
588 btrfs_tree_unlock(cow);
589 free_extent_buffer(cow);
590 btrfs_abort_transaction(trans, ret);
591 return ret;
592 }
593
594 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
595 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
596 if (ret) {
597 btrfs_tree_unlock(cow);
598 free_extent_buffer(cow);
599 btrfs_abort_transaction(trans, ret);
600 return ret;
601 }
602 }
603
604 if (buf == root->node) {
605 WARN_ON(parent && parent != buf);
606 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
607 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
608 parent_start = buf->start;
609
610 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
611 if (ret < 0) {
612 btrfs_tree_unlock(cow);
613 free_extent_buffer(cow);
614 btrfs_abort_transaction(trans, ret);
615 return ret;
616 }
617 atomic_inc(&cow->refs);
618 rcu_assign_pointer(root->node, cow);
619
620 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
621 parent_start, last_ref);
622 free_extent_buffer(buf);
623 add_root_to_dirty_list(root);
624 if (ret < 0) {
625 btrfs_tree_unlock(cow);
626 free_extent_buffer(cow);
627 btrfs_abort_transaction(trans, ret);
628 return ret;
629 }
630 } else {
631 WARN_ON(trans->transid != btrfs_header_generation(parent));
632 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
633 BTRFS_MOD_LOG_KEY_REPLACE);
634 if (ret) {
635 btrfs_tree_unlock(cow);
636 free_extent_buffer(cow);
637 btrfs_abort_transaction(trans, ret);
638 return ret;
639 }
640 btrfs_set_node_blockptr(parent, parent_slot,
641 cow->start);
642 btrfs_set_node_ptr_generation(parent, parent_slot,
643 trans->transid);
644 btrfs_mark_buffer_dirty(trans, parent);
645 if (last_ref) {
646 ret = btrfs_tree_mod_log_free_eb(buf);
647 if (ret) {
648 btrfs_tree_unlock(cow);
649 free_extent_buffer(cow);
650 btrfs_abort_transaction(trans, ret);
651 return ret;
652 }
653 }
654 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
655 parent_start, last_ref);
656 if (ret < 0) {
657 btrfs_tree_unlock(cow);
658 free_extent_buffer(cow);
659 btrfs_abort_transaction(trans, ret);
660 return ret;
661 }
662 }
663 if (unlock_orig)
664 btrfs_tree_unlock(buf);
665 free_extent_buffer_stale(buf);
666 btrfs_mark_buffer_dirty(trans, cow);
667 *cow_ret = cow;
668 return 0;
669 }
670
should_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)671 static inline int should_cow_block(struct btrfs_trans_handle *trans,
672 struct btrfs_root *root,
673 struct extent_buffer *buf)
674 {
675 if (btrfs_is_testing(root->fs_info))
676 return 0;
677
678 /* Ensure we can see the FORCE_COW bit */
679 smp_mb__before_atomic();
680
681 /*
682 * We do not need to cow a block if
683 * 1) this block is not created or changed in this transaction;
684 * 2) this block does not belong to TREE_RELOC tree;
685 * 3) the root is not forced COW.
686 *
687 * What is forced COW:
688 * when we create snapshot during committing the transaction,
689 * after we've finished copying src root, we must COW the shared
690 * block to ensure the metadata consistency.
691 */
692 if (btrfs_header_generation(buf) == trans->transid &&
693 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
694 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
695 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
696 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
697 return 0;
698 return 1;
699 }
700
701 /*
702 * cows a single block, see __btrfs_cow_block for the real work.
703 * This version of it has extra checks so that a block isn't COWed more than
704 * once per transaction, as long as it hasn't been written yet
705 */
btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,enum btrfs_lock_nesting nest)706 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
707 struct btrfs_root *root, struct extent_buffer *buf,
708 struct extent_buffer *parent, int parent_slot,
709 struct extent_buffer **cow_ret,
710 enum btrfs_lock_nesting nest)
711 {
712 struct btrfs_fs_info *fs_info = root->fs_info;
713 u64 search_start;
714 int ret;
715
716 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
717 btrfs_abort_transaction(trans, -EUCLEAN);
718 btrfs_crit(fs_info,
719 "attempt to COW block %llu on root %llu that is being deleted",
720 buf->start, btrfs_root_id(root));
721 return -EUCLEAN;
722 }
723
724 /*
725 * COWing must happen through a running transaction, which always
726 * matches the current fs generation (it's a transaction with a state
727 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
728 * into error state to prevent the commit of any transaction.
729 */
730 if (unlikely(trans->transaction != fs_info->running_transaction ||
731 trans->transid != fs_info->generation)) {
732 btrfs_abort_transaction(trans, -EUCLEAN);
733 btrfs_crit(fs_info,
734 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
735 buf->start, btrfs_root_id(root), trans->transid,
736 fs_info->running_transaction->transid,
737 fs_info->generation);
738 return -EUCLEAN;
739 }
740
741 if (!should_cow_block(trans, root, buf)) {
742 *cow_ret = buf;
743 return 0;
744 }
745
746 search_start = buf->start & ~((u64)SZ_1G - 1);
747
748 /*
749 * Before CoWing this block for later modification, check if it's
750 * the subtree root and do the delayed subtree trace if needed.
751 *
752 * Also We don't care about the error, as it's handled internally.
753 */
754 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
755 ret = __btrfs_cow_block(trans, root, buf, parent,
756 parent_slot, cow_ret, search_start, 0, nest);
757
758 trace_btrfs_cow_block(root, buf, *cow_ret);
759
760 return ret;
761 }
762 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
763
764 /*
765 * helper function for defrag to decide if two blocks pointed to by a
766 * node are actually close by
767 */
close_blocks(u64 blocknr,u64 other,u32 blocksize)768 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
769 {
770 if (blocknr < other && other - (blocknr + blocksize) < 32768)
771 return 1;
772 if (blocknr > other && blocknr - (other + blocksize) < 32768)
773 return 1;
774 return 0;
775 }
776
777 #ifdef __LITTLE_ENDIAN
778
779 /*
780 * Compare two keys, on little-endian the disk order is same as CPU order and
781 * we can avoid the conversion.
782 */
comp_keys(const struct btrfs_disk_key * disk_key,const struct btrfs_key * k2)783 static int comp_keys(const struct btrfs_disk_key *disk_key,
784 const struct btrfs_key *k2)
785 {
786 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
787
788 return btrfs_comp_cpu_keys(k1, k2);
789 }
790
791 #else
792
793 /*
794 * compare two keys in a memcmp fashion
795 */
comp_keys(const struct btrfs_disk_key * disk,const struct btrfs_key * k2)796 static int comp_keys(const struct btrfs_disk_key *disk,
797 const struct btrfs_key *k2)
798 {
799 struct btrfs_key k1;
800
801 btrfs_disk_key_to_cpu(&k1, disk);
802
803 return btrfs_comp_cpu_keys(&k1, k2);
804 }
805 #endif
806
807 /*
808 * same as comp_keys only with two btrfs_key's
809 */
btrfs_comp_cpu_keys(const struct btrfs_key * k1,const struct btrfs_key * k2)810 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
811 {
812 if (k1->objectid > k2->objectid)
813 return 1;
814 if (k1->objectid < k2->objectid)
815 return -1;
816 if (k1->type > k2->type)
817 return 1;
818 if (k1->type < k2->type)
819 return -1;
820 if (k1->offset > k2->offset)
821 return 1;
822 if (k1->offset < k2->offset)
823 return -1;
824 return 0;
825 }
826
827 /*
828 * this is used by the defrag code to go through all the
829 * leaves pointed to by a node and reallocate them so that
830 * disk order is close to key order
831 */
btrfs_realloc_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * parent,int start_slot,u64 * last_ret,struct btrfs_key * progress)832 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
833 struct btrfs_root *root, struct extent_buffer *parent,
834 int start_slot, u64 *last_ret,
835 struct btrfs_key *progress)
836 {
837 struct btrfs_fs_info *fs_info = root->fs_info;
838 struct extent_buffer *cur;
839 u64 blocknr;
840 u64 search_start = *last_ret;
841 u64 last_block = 0;
842 u64 other;
843 u32 parent_nritems;
844 int end_slot;
845 int i;
846 int err = 0;
847 u32 blocksize;
848 int progress_passed = 0;
849 struct btrfs_disk_key disk_key;
850
851 /*
852 * COWing must happen through a running transaction, which always
853 * matches the current fs generation (it's a transaction with a state
854 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
855 * into error state to prevent the commit of any transaction.
856 */
857 if (unlikely(trans->transaction != fs_info->running_transaction ||
858 trans->transid != fs_info->generation)) {
859 btrfs_abort_transaction(trans, -EUCLEAN);
860 btrfs_crit(fs_info,
861 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
862 parent->start, btrfs_root_id(root), trans->transid,
863 fs_info->running_transaction->transid,
864 fs_info->generation);
865 return -EUCLEAN;
866 }
867
868 parent_nritems = btrfs_header_nritems(parent);
869 blocksize = fs_info->nodesize;
870 end_slot = parent_nritems - 1;
871
872 if (parent_nritems <= 1)
873 return 0;
874
875 for (i = start_slot; i <= end_slot; i++) {
876 int close = 1;
877
878 btrfs_node_key(parent, &disk_key, i);
879 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
880 continue;
881
882 progress_passed = 1;
883 blocknr = btrfs_node_blockptr(parent, i);
884 if (last_block == 0)
885 last_block = blocknr;
886
887 if (i > 0) {
888 other = btrfs_node_blockptr(parent, i - 1);
889 close = close_blocks(blocknr, other, blocksize);
890 }
891 if (!close && i < end_slot) {
892 other = btrfs_node_blockptr(parent, i + 1);
893 close = close_blocks(blocknr, other, blocksize);
894 }
895 if (close) {
896 last_block = blocknr;
897 continue;
898 }
899
900 cur = btrfs_read_node_slot(parent, i);
901 if (IS_ERR(cur))
902 return PTR_ERR(cur);
903 if (search_start == 0)
904 search_start = last_block;
905
906 btrfs_tree_lock(cur);
907 err = __btrfs_cow_block(trans, root, cur, parent, i,
908 &cur, search_start,
909 min(16 * blocksize,
910 (end_slot - i) * blocksize),
911 BTRFS_NESTING_COW);
912 if (err) {
913 btrfs_tree_unlock(cur);
914 free_extent_buffer(cur);
915 break;
916 }
917 search_start = cur->start;
918 last_block = cur->start;
919 *last_ret = search_start;
920 btrfs_tree_unlock(cur);
921 free_extent_buffer(cur);
922 }
923 return err;
924 }
925
926 /*
927 * Search for a key in the given extent_buffer.
928 *
929 * The lower boundary for the search is specified by the slot number @first_slot.
930 * Use a value of 0 to search over the whole extent buffer. Works for both
931 * leaves and nodes.
932 *
933 * The slot in the extent buffer is returned via @slot. If the key exists in the
934 * extent buffer, then @slot will point to the slot where the key is, otherwise
935 * it points to the slot where you would insert the key.
936 *
937 * Slot may point to the total number of items (i.e. one position beyond the last
938 * key) if the key is bigger than the last key in the extent buffer.
939 */
btrfs_bin_search(struct extent_buffer * eb,int first_slot,const struct btrfs_key * key,int * slot)940 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
941 const struct btrfs_key *key, int *slot)
942 {
943 unsigned long p;
944 int item_size;
945 /*
946 * Use unsigned types for the low and high slots, so that we get a more
947 * efficient division in the search loop below.
948 */
949 u32 low = first_slot;
950 u32 high = btrfs_header_nritems(eb);
951 int ret;
952 const int key_size = sizeof(struct btrfs_disk_key);
953
954 if (unlikely(low > high)) {
955 btrfs_err(eb->fs_info,
956 "%s: low (%u) > high (%u) eb %llu owner %llu level %d",
957 __func__, low, high, eb->start,
958 btrfs_header_owner(eb), btrfs_header_level(eb));
959 return -EINVAL;
960 }
961
962 if (btrfs_header_level(eb) == 0) {
963 p = offsetof(struct btrfs_leaf, items);
964 item_size = sizeof(struct btrfs_item);
965 } else {
966 p = offsetof(struct btrfs_node, ptrs);
967 item_size = sizeof(struct btrfs_key_ptr);
968 }
969
970 while (low < high) {
971 unsigned long oip;
972 unsigned long offset;
973 struct btrfs_disk_key *tmp;
974 struct btrfs_disk_key unaligned;
975 int mid;
976
977 mid = (low + high) / 2;
978 offset = p + mid * item_size;
979 oip = offset_in_page(offset);
980
981 if (oip + key_size <= PAGE_SIZE) {
982 const unsigned long idx = get_eb_page_index(offset);
983 char *kaddr = page_address(eb->pages[idx]);
984
985 oip = get_eb_offset_in_page(eb, offset);
986 tmp = (struct btrfs_disk_key *)(kaddr + oip);
987 } else {
988 read_extent_buffer(eb, &unaligned, offset, key_size);
989 tmp = &unaligned;
990 }
991
992 ret = comp_keys(tmp, key);
993
994 if (ret < 0)
995 low = mid + 1;
996 else if (ret > 0)
997 high = mid;
998 else {
999 *slot = mid;
1000 return 0;
1001 }
1002 }
1003 *slot = low;
1004 return 1;
1005 }
1006
root_add_used(struct btrfs_root * root,u32 size)1007 static void root_add_used(struct btrfs_root *root, u32 size)
1008 {
1009 spin_lock(&root->accounting_lock);
1010 btrfs_set_root_used(&root->root_item,
1011 btrfs_root_used(&root->root_item) + size);
1012 spin_unlock(&root->accounting_lock);
1013 }
1014
root_sub_used(struct btrfs_root * root,u32 size)1015 static void root_sub_used(struct btrfs_root *root, u32 size)
1016 {
1017 spin_lock(&root->accounting_lock);
1018 btrfs_set_root_used(&root->root_item,
1019 btrfs_root_used(&root->root_item) - size);
1020 spin_unlock(&root->accounting_lock);
1021 }
1022
1023 /* given a node and slot number, this reads the blocks it points to. The
1024 * extent buffer is returned with a reference taken (but unlocked).
1025 */
btrfs_read_node_slot(struct extent_buffer * parent,int slot)1026 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1027 int slot)
1028 {
1029 int level = btrfs_header_level(parent);
1030 struct btrfs_tree_parent_check check = { 0 };
1031 struct extent_buffer *eb;
1032
1033 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1034 return ERR_PTR(-ENOENT);
1035
1036 ASSERT(level);
1037
1038 check.level = level - 1;
1039 check.transid = btrfs_node_ptr_generation(parent, slot);
1040 check.owner_root = btrfs_header_owner(parent);
1041 check.has_first_key = true;
1042 btrfs_node_key_to_cpu(parent, &check.first_key, slot);
1043
1044 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1045 &check);
1046 if (IS_ERR(eb))
1047 return eb;
1048 if (!extent_buffer_uptodate(eb)) {
1049 free_extent_buffer(eb);
1050 return ERR_PTR(-EIO);
1051 }
1052
1053 return eb;
1054 }
1055
1056 /*
1057 * node level balancing, used to make sure nodes are in proper order for
1058 * item deletion. We balance from the top down, so we have to make sure
1059 * that a deletion won't leave an node completely empty later on.
1060 */
balance_level(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1061 static noinline int balance_level(struct btrfs_trans_handle *trans,
1062 struct btrfs_root *root,
1063 struct btrfs_path *path, int level)
1064 {
1065 struct btrfs_fs_info *fs_info = root->fs_info;
1066 struct extent_buffer *right = NULL;
1067 struct extent_buffer *mid;
1068 struct extent_buffer *left = NULL;
1069 struct extent_buffer *parent = NULL;
1070 int ret = 0;
1071 int wret;
1072 int pslot;
1073 int orig_slot = path->slots[level];
1074 u64 orig_ptr;
1075
1076 ASSERT(level > 0);
1077
1078 mid = path->nodes[level];
1079
1080 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
1081 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1082
1083 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1084
1085 if (level < BTRFS_MAX_LEVEL - 1) {
1086 parent = path->nodes[level + 1];
1087 pslot = path->slots[level + 1];
1088 }
1089
1090 /*
1091 * deal with the case where there is only one pointer in the root
1092 * by promoting the node below to a root
1093 */
1094 if (!parent) {
1095 struct extent_buffer *child;
1096
1097 if (btrfs_header_nritems(mid) != 1)
1098 return 0;
1099
1100 /* promote the child to a root */
1101 child = btrfs_read_node_slot(mid, 0);
1102 if (IS_ERR(child)) {
1103 ret = PTR_ERR(child);
1104 goto out;
1105 }
1106
1107 btrfs_tree_lock(child);
1108 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
1109 BTRFS_NESTING_COW);
1110 if (ret) {
1111 btrfs_tree_unlock(child);
1112 free_extent_buffer(child);
1113 goto out;
1114 }
1115
1116 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
1117 if (ret < 0) {
1118 btrfs_tree_unlock(child);
1119 free_extent_buffer(child);
1120 btrfs_abort_transaction(trans, ret);
1121 goto out;
1122 }
1123 rcu_assign_pointer(root->node, child);
1124
1125 add_root_to_dirty_list(root);
1126 btrfs_tree_unlock(child);
1127
1128 path->locks[level] = 0;
1129 path->nodes[level] = NULL;
1130 btrfs_clear_buffer_dirty(trans, mid);
1131 btrfs_tree_unlock(mid);
1132 /* once for the path */
1133 free_extent_buffer(mid);
1134
1135 root_sub_used(root, mid->len);
1136 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1137 /* once for the root ptr */
1138 free_extent_buffer_stale(mid);
1139 if (ret < 0) {
1140 btrfs_abort_transaction(trans, ret);
1141 goto out;
1142 }
1143 return 0;
1144 }
1145 if (btrfs_header_nritems(mid) >
1146 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1147 return 0;
1148
1149 if (pslot) {
1150 left = btrfs_read_node_slot(parent, pslot - 1);
1151 if (IS_ERR(left)) {
1152 ret = PTR_ERR(left);
1153 left = NULL;
1154 goto out;
1155 }
1156
1157 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1158 wret = btrfs_cow_block(trans, root, left,
1159 parent, pslot - 1, &left,
1160 BTRFS_NESTING_LEFT_COW);
1161 if (wret) {
1162 ret = wret;
1163 goto out;
1164 }
1165 }
1166
1167 if (pslot + 1 < btrfs_header_nritems(parent)) {
1168 right = btrfs_read_node_slot(parent, pslot + 1);
1169 if (IS_ERR(right)) {
1170 ret = PTR_ERR(right);
1171 right = NULL;
1172 goto out;
1173 }
1174
1175 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1176 wret = btrfs_cow_block(trans, root, right,
1177 parent, pslot + 1, &right,
1178 BTRFS_NESTING_RIGHT_COW);
1179 if (wret) {
1180 ret = wret;
1181 goto out;
1182 }
1183 }
1184
1185 /* first, try to make some room in the middle buffer */
1186 if (left) {
1187 orig_slot += btrfs_header_nritems(left);
1188 wret = push_node_left(trans, left, mid, 1);
1189 if (wret < 0)
1190 ret = wret;
1191 }
1192
1193 /*
1194 * then try to empty the right most buffer into the middle
1195 */
1196 if (right) {
1197 wret = push_node_left(trans, mid, right, 1);
1198 if (wret < 0 && wret != -ENOSPC)
1199 ret = wret;
1200 if (btrfs_header_nritems(right) == 0) {
1201 btrfs_clear_buffer_dirty(trans, right);
1202 btrfs_tree_unlock(right);
1203 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1);
1204 if (ret < 0) {
1205 free_extent_buffer_stale(right);
1206 right = NULL;
1207 goto out;
1208 }
1209 root_sub_used(root, right->len);
1210 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), right,
1211 0, 1);
1212 free_extent_buffer_stale(right);
1213 right = NULL;
1214 if (ret < 0) {
1215 btrfs_abort_transaction(trans, ret);
1216 goto out;
1217 }
1218 } else {
1219 struct btrfs_disk_key right_key;
1220 btrfs_node_key(right, &right_key, 0);
1221 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1222 BTRFS_MOD_LOG_KEY_REPLACE);
1223 if (ret < 0) {
1224 btrfs_abort_transaction(trans, ret);
1225 goto out;
1226 }
1227 btrfs_set_node_key(parent, &right_key, pslot + 1);
1228 btrfs_mark_buffer_dirty(trans, parent);
1229 }
1230 }
1231 if (btrfs_header_nritems(mid) == 1) {
1232 /*
1233 * we're not allowed to leave a node with one item in the
1234 * tree during a delete. A deletion from lower in the tree
1235 * could try to delete the only pointer in this node.
1236 * So, pull some keys from the left.
1237 * There has to be a left pointer at this point because
1238 * otherwise we would have pulled some pointers from the
1239 * right
1240 */
1241 if (unlikely(!left)) {
1242 btrfs_crit(fs_info,
1243 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu",
1244 parent->start, btrfs_header_level(parent),
1245 mid->start, btrfs_root_id(root));
1246 ret = -EUCLEAN;
1247 btrfs_abort_transaction(trans, ret);
1248 goto out;
1249 }
1250 wret = balance_node_right(trans, mid, left);
1251 if (wret < 0) {
1252 ret = wret;
1253 goto out;
1254 }
1255 if (wret == 1) {
1256 wret = push_node_left(trans, left, mid, 1);
1257 if (wret < 0)
1258 ret = wret;
1259 }
1260 BUG_ON(wret == 1);
1261 }
1262 if (btrfs_header_nritems(mid) == 0) {
1263 btrfs_clear_buffer_dirty(trans, mid);
1264 btrfs_tree_unlock(mid);
1265 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot);
1266 if (ret < 0) {
1267 free_extent_buffer_stale(mid);
1268 mid = NULL;
1269 goto out;
1270 }
1271 root_sub_used(root, mid->len);
1272 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1273 free_extent_buffer_stale(mid);
1274 mid = NULL;
1275 if (ret < 0) {
1276 btrfs_abort_transaction(trans, ret);
1277 goto out;
1278 }
1279 } else {
1280 /* update the parent key to reflect our changes */
1281 struct btrfs_disk_key mid_key;
1282 btrfs_node_key(mid, &mid_key, 0);
1283 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1284 BTRFS_MOD_LOG_KEY_REPLACE);
1285 if (ret < 0) {
1286 btrfs_abort_transaction(trans, ret);
1287 goto out;
1288 }
1289 btrfs_set_node_key(parent, &mid_key, pslot);
1290 btrfs_mark_buffer_dirty(trans, parent);
1291 }
1292
1293 /* update the path */
1294 if (left) {
1295 if (btrfs_header_nritems(left) > orig_slot) {
1296 atomic_inc(&left->refs);
1297 /* left was locked after cow */
1298 path->nodes[level] = left;
1299 path->slots[level + 1] -= 1;
1300 path->slots[level] = orig_slot;
1301 if (mid) {
1302 btrfs_tree_unlock(mid);
1303 free_extent_buffer(mid);
1304 }
1305 } else {
1306 orig_slot -= btrfs_header_nritems(left);
1307 path->slots[level] = orig_slot;
1308 }
1309 }
1310 /* double check we haven't messed things up */
1311 if (orig_ptr !=
1312 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1313 BUG();
1314 out:
1315 if (right) {
1316 btrfs_tree_unlock(right);
1317 free_extent_buffer(right);
1318 }
1319 if (left) {
1320 if (path->nodes[level] != left)
1321 btrfs_tree_unlock(left);
1322 free_extent_buffer(left);
1323 }
1324 return ret;
1325 }
1326
1327 /* Node balancing for insertion. Here we only split or push nodes around
1328 * when they are completely full. This is also done top down, so we
1329 * have to be pessimistic.
1330 */
push_nodes_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1331 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1332 struct btrfs_root *root,
1333 struct btrfs_path *path, int level)
1334 {
1335 struct btrfs_fs_info *fs_info = root->fs_info;
1336 struct extent_buffer *right = NULL;
1337 struct extent_buffer *mid;
1338 struct extent_buffer *left = NULL;
1339 struct extent_buffer *parent = NULL;
1340 int ret = 0;
1341 int wret;
1342 int pslot;
1343 int orig_slot = path->slots[level];
1344
1345 if (level == 0)
1346 return 1;
1347
1348 mid = path->nodes[level];
1349 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1350
1351 if (level < BTRFS_MAX_LEVEL - 1) {
1352 parent = path->nodes[level + 1];
1353 pslot = path->slots[level + 1];
1354 }
1355
1356 if (!parent)
1357 return 1;
1358
1359 /* first, try to make some room in the middle buffer */
1360 if (pslot) {
1361 u32 left_nr;
1362
1363 left = btrfs_read_node_slot(parent, pslot - 1);
1364 if (IS_ERR(left))
1365 return PTR_ERR(left);
1366
1367 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1368
1369 left_nr = btrfs_header_nritems(left);
1370 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1371 wret = 1;
1372 } else {
1373 ret = btrfs_cow_block(trans, root, left, parent,
1374 pslot - 1, &left,
1375 BTRFS_NESTING_LEFT_COW);
1376 if (ret)
1377 wret = 1;
1378 else {
1379 wret = push_node_left(trans, left, mid, 0);
1380 }
1381 }
1382 if (wret < 0)
1383 ret = wret;
1384 if (wret == 0) {
1385 struct btrfs_disk_key disk_key;
1386 orig_slot += left_nr;
1387 btrfs_node_key(mid, &disk_key, 0);
1388 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1389 BTRFS_MOD_LOG_KEY_REPLACE);
1390 if (ret < 0) {
1391 btrfs_tree_unlock(left);
1392 free_extent_buffer(left);
1393 btrfs_abort_transaction(trans, ret);
1394 return ret;
1395 }
1396 btrfs_set_node_key(parent, &disk_key, pslot);
1397 btrfs_mark_buffer_dirty(trans, parent);
1398 if (btrfs_header_nritems(left) > orig_slot) {
1399 path->nodes[level] = left;
1400 path->slots[level + 1] -= 1;
1401 path->slots[level] = orig_slot;
1402 btrfs_tree_unlock(mid);
1403 free_extent_buffer(mid);
1404 } else {
1405 orig_slot -=
1406 btrfs_header_nritems(left);
1407 path->slots[level] = orig_slot;
1408 btrfs_tree_unlock(left);
1409 free_extent_buffer(left);
1410 }
1411 return 0;
1412 }
1413 btrfs_tree_unlock(left);
1414 free_extent_buffer(left);
1415 }
1416
1417 /*
1418 * then try to empty the right most buffer into the middle
1419 */
1420 if (pslot + 1 < btrfs_header_nritems(parent)) {
1421 u32 right_nr;
1422
1423 right = btrfs_read_node_slot(parent, pslot + 1);
1424 if (IS_ERR(right))
1425 return PTR_ERR(right);
1426
1427 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1428
1429 right_nr = btrfs_header_nritems(right);
1430 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1431 wret = 1;
1432 } else {
1433 ret = btrfs_cow_block(trans, root, right,
1434 parent, pslot + 1,
1435 &right, BTRFS_NESTING_RIGHT_COW);
1436 if (ret)
1437 wret = 1;
1438 else {
1439 wret = balance_node_right(trans, right, mid);
1440 }
1441 }
1442 if (wret < 0)
1443 ret = wret;
1444 if (wret == 0) {
1445 struct btrfs_disk_key disk_key;
1446
1447 btrfs_node_key(right, &disk_key, 0);
1448 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1449 BTRFS_MOD_LOG_KEY_REPLACE);
1450 if (ret < 0) {
1451 btrfs_tree_unlock(right);
1452 free_extent_buffer(right);
1453 btrfs_abort_transaction(trans, ret);
1454 return ret;
1455 }
1456 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1457 btrfs_mark_buffer_dirty(trans, parent);
1458
1459 if (btrfs_header_nritems(mid) <= orig_slot) {
1460 path->nodes[level] = right;
1461 path->slots[level + 1] += 1;
1462 path->slots[level] = orig_slot -
1463 btrfs_header_nritems(mid);
1464 btrfs_tree_unlock(mid);
1465 free_extent_buffer(mid);
1466 } else {
1467 btrfs_tree_unlock(right);
1468 free_extent_buffer(right);
1469 }
1470 return 0;
1471 }
1472 btrfs_tree_unlock(right);
1473 free_extent_buffer(right);
1474 }
1475 return 1;
1476 }
1477
1478 /*
1479 * readahead one full node of leaves, finding things that are close
1480 * to the block in 'slot', and triggering ra on them.
1481 */
reada_for_search(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int level,int slot,u64 objectid)1482 static void reada_for_search(struct btrfs_fs_info *fs_info,
1483 struct btrfs_path *path,
1484 int level, int slot, u64 objectid)
1485 {
1486 struct extent_buffer *node;
1487 struct btrfs_disk_key disk_key;
1488 u32 nritems;
1489 u64 search;
1490 u64 target;
1491 u64 nread = 0;
1492 u64 nread_max;
1493 u32 nr;
1494 u32 blocksize;
1495 u32 nscan = 0;
1496
1497 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1498 return;
1499
1500 if (!path->nodes[level])
1501 return;
1502
1503 node = path->nodes[level];
1504
1505 /*
1506 * Since the time between visiting leaves is much shorter than the time
1507 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1508 * much IO at once (possibly random).
1509 */
1510 if (path->reada == READA_FORWARD_ALWAYS) {
1511 if (level > 1)
1512 nread_max = node->fs_info->nodesize;
1513 else
1514 nread_max = SZ_128K;
1515 } else {
1516 nread_max = SZ_64K;
1517 }
1518
1519 search = btrfs_node_blockptr(node, slot);
1520 blocksize = fs_info->nodesize;
1521 if (path->reada != READA_FORWARD_ALWAYS) {
1522 struct extent_buffer *eb;
1523
1524 eb = find_extent_buffer(fs_info, search);
1525 if (eb) {
1526 free_extent_buffer(eb);
1527 return;
1528 }
1529 }
1530
1531 target = search;
1532
1533 nritems = btrfs_header_nritems(node);
1534 nr = slot;
1535
1536 while (1) {
1537 if (path->reada == READA_BACK) {
1538 if (nr == 0)
1539 break;
1540 nr--;
1541 } else if (path->reada == READA_FORWARD ||
1542 path->reada == READA_FORWARD_ALWAYS) {
1543 nr++;
1544 if (nr >= nritems)
1545 break;
1546 }
1547 if (path->reada == READA_BACK && objectid) {
1548 btrfs_node_key(node, &disk_key, nr);
1549 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1550 break;
1551 }
1552 search = btrfs_node_blockptr(node, nr);
1553 if (path->reada == READA_FORWARD_ALWAYS ||
1554 (search <= target && target - search <= 65536) ||
1555 (search > target && search - target <= 65536)) {
1556 btrfs_readahead_node_child(node, nr);
1557 nread += blocksize;
1558 }
1559 nscan++;
1560 if (nread > nread_max || nscan > 32)
1561 break;
1562 }
1563 }
1564
reada_for_balance(struct btrfs_path * path,int level)1565 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1566 {
1567 struct extent_buffer *parent;
1568 int slot;
1569 int nritems;
1570
1571 parent = path->nodes[level + 1];
1572 if (!parent)
1573 return;
1574
1575 nritems = btrfs_header_nritems(parent);
1576 slot = path->slots[level + 1];
1577
1578 if (slot > 0)
1579 btrfs_readahead_node_child(parent, slot - 1);
1580 if (slot + 1 < nritems)
1581 btrfs_readahead_node_child(parent, slot + 1);
1582 }
1583
1584
1585 /*
1586 * when we walk down the tree, it is usually safe to unlock the higher layers
1587 * in the tree. The exceptions are when our path goes through slot 0, because
1588 * operations on the tree might require changing key pointers higher up in the
1589 * tree.
1590 *
1591 * callers might also have set path->keep_locks, which tells this code to keep
1592 * the lock if the path points to the last slot in the block. This is part of
1593 * walking through the tree, and selecting the next slot in the higher block.
1594 *
1595 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1596 * if lowest_unlock is 1, level 0 won't be unlocked
1597 */
unlock_up(struct btrfs_path * path,int level,int lowest_unlock,int min_write_lock_level,int * write_lock_level)1598 static noinline void unlock_up(struct btrfs_path *path, int level,
1599 int lowest_unlock, int min_write_lock_level,
1600 int *write_lock_level)
1601 {
1602 int i;
1603 int skip_level = level;
1604 bool check_skip = true;
1605
1606 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1607 if (!path->nodes[i])
1608 break;
1609 if (!path->locks[i])
1610 break;
1611
1612 if (check_skip) {
1613 if (path->slots[i] == 0) {
1614 skip_level = i + 1;
1615 continue;
1616 }
1617
1618 if (path->keep_locks) {
1619 u32 nritems;
1620
1621 nritems = btrfs_header_nritems(path->nodes[i]);
1622 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1623 skip_level = i + 1;
1624 continue;
1625 }
1626 }
1627 }
1628
1629 if (i >= lowest_unlock && i > skip_level) {
1630 check_skip = false;
1631 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1632 path->locks[i] = 0;
1633 if (write_lock_level &&
1634 i > min_write_lock_level &&
1635 i <= *write_lock_level) {
1636 *write_lock_level = i - 1;
1637 }
1638 }
1639 }
1640 }
1641
1642 /*
1643 * Helper function for btrfs_search_slot() and other functions that do a search
1644 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1645 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1646 * its pages from disk.
1647 *
1648 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1649 * whole btree search, starting again from the current root node.
1650 */
1651 static int
read_block_for_search(struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer ** eb_ret,int level,int slot,const struct btrfs_key * key)1652 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1653 struct extent_buffer **eb_ret, int level, int slot,
1654 const struct btrfs_key *key)
1655 {
1656 struct btrfs_fs_info *fs_info = root->fs_info;
1657 struct btrfs_tree_parent_check check = { 0 };
1658 u64 blocknr;
1659 u64 gen;
1660 struct extent_buffer *tmp;
1661 int ret;
1662 int parent_level;
1663 bool unlock_up;
1664
1665 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
1666 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1667 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1668 parent_level = btrfs_header_level(*eb_ret);
1669 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
1670 check.has_first_key = true;
1671 check.level = parent_level - 1;
1672 check.transid = gen;
1673 check.owner_root = root->root_key.objectid;
1674
1675 /*
1676 * If we need to read an extent buffer from disk and we are holding locks
1677 * on upper level nodes, we unlock all the upper nodes before reading the
1678 * extent buffer, and then return -EAGAIN to the caller as it needs to
1679 * restart the search. We don't release the lock on the current level
1680 * because we need to walk this node to figure out which blocks to read.
1681 */
1682 tmp = find_extent_buffer(fs_info, blocknr);
1683 if (tmp) {
1684 if (p->reada == READA_FORWARD_ALWAYS)
1685 reada_for_search(fs_info, p, level, slot, key->objectid);
1686
1687 /* first we do an atomic uptodate check */
1688 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1689 /*
1690 * Do extra check for first_key, eb can be stale due to
1691 * being cached, read from scrub, or have multiple
1692 * parents (shared tree blocks).
1693 */
1694 if (btrfs_verify_level_key(tmp,
1695 parent_level - 1, &check.first_key, gen)) {
1696 free_extent_buffer(tmp);
1697 return -EUCLEAN;
1698 }
1699 *eb_ret = tmp;
1700 return 0;
1701 }
1702
1703 if (p->nowait) {
1704 free_extent_buffer(tmp);
1705 return -EAGAIN;
1706 }
1707
1708 if (unlock_up)
1709 btrfs_unlock_up_safe(p, level + 1);
1710
1711 /* now we're allowed to do a blocking uptodate check */
1712 ret = btrfs_read_extent_buffer(tmp, &check);
1713 if (ret) {
1714 free_extent_buffer(tmp);
1715 btrfs_release_path(p);
1716 return -EIO;
1717 }
1718 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
1719 free_extent_buffer(tmp);
1720 btrfs_release_path(p);
1721 return -EUCLEAN;
1722 }
1723
1724 if (unlock_up)
1725 ret = -EAGAIN;
1726
1727 goto out;
1728 } else if (p->nowait) {
1729 return -EAGAIN;
1730 }
1731
1732 if (unlock_up) {
1733 btrfs_unlock_up_safe(p, level + 1);
1734 ret = -EAGAIN;
1735 } else {
1736 ret = 0;
1737 }
1738
1739 if (p->reada != READA_NONE)
1740 reada_for_search(fs_info, p, level, slot, key->objectid);
1741
1742 tmp = read_tree_block(fs_info, blocknr, &check);
1743 if (IS_ERR(tmp)) {
1744 btrfs_release_path(p);
1745 return PTR_ERR(tmp);
1746 }
1747 /*
1748 * If the read above didn't mark this buffer up to date,
1749 * it will never end up being up to date. Set ret to EIO now
1750 * and give up so that our caller doesn't loop forever
1751 * on our EAGAINs.
1752 */
1753 if (!extent_buffer_uptodate(tmp))
1754 ret = -EIO;
1755
1756 out:
1757 if (ret == 0) {
1758 *eb_ret = tmp;
1759 } else {
1760 free_extent_buffer(tmp);
1761 btrfs_release_path(p);
1762 }
1763
1764 return ret;
1765 }
1766
1767 /*
1768 * helper function for btrfs_search_slot. This does all of the checks
1769 * for node-level blocks and does any balancing required based on
1770 * the ins_len.
1771 *
1772 * If no extra work was required, zero is returned. If we had to
1773 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1774 * start over
1775 */
1776 static int
setup_nodes_for_search(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer * b,int level,int ins_len,int * write_lock_level)1777 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1778 struct btrfs_root *root, struct btrfs_path *p,
1779 struct extent_buffer *b, int level, int ins_len,
1780 int *write_lock_level)
1781 {
1782 struct btrfs_fs_info *fs_info = root->fs_info;
1783 int ret = 0;
1784
1785 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1786 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1787
1788 if (*write_lock_level < level + 1) {
1789 *write_lock_level = level + 1;
1790 btrfs_release_path(p);
1791 return -EAGAIN;
1792 }
1793
1794 reada_for_balance(p, level);
1795 ret = split_node(trans, root, p, level);
1796
1797 b = p->nodes[level];
1798 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1799 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1800
1801 if (*write_lock_level < level + 1) {
1802 *write_lock_level = level + 1;
1803 btrfs_release_path(p);
1804 return -EAGAIN;
1805 }
1806
1807 reada_for_balance(p, level);
1808 ret = balance_level(trans, root, p, level);
1809 if (ret)
1810 return ret;
1811
1812 b = p->nodes[level];
1813 if (!b) {
1814 btrfs_release_path(p);
1815 return -EAGAIN;
1816 }
1817 BUG_ON(btrfs_header_nritems(b) == 1);
1818 }
1819 return ret;
1820 }
1821
btrfs_find_item(struct btrfs_root * fs_root,struct btrfs_path * path,u64 iobjectid,u64 ioff,u8 key_type,struct btrfs_key * found_key)1822 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1823 u64 iobjectid, u64 ioff, u8 key_type,
1824 struct btrfs_key *found_key)
1825 {
1826 int ret;
1827 struct btrfs_key key;
1828 struct extent_buffer *eb;
1829
1830 ASSERT(path);
1831 ASSERT(found_key);
1832
1833 key.type = key_type;
1834 key.objectid = iobjectid;
1835 key.offset = ioff;
1836
1837 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1838 if (ret < 0)
1839 return ret;
1840
1841 eb = path->nodes[0];
1842 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1843 ret = btrfs_next_leaf(fs_root, path);
1844 if (ret)
1845 return ret;
1846 eb = path->nodes[0];
1847 }
1848
1849 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1850 if (found_key->type != key.type ||
1851 found_key->objectid != key.objectid)
1852 return 1;
1853
1854 return 0;
1855 }
1856
btrfs_search_slot_get_root(struct btrfs_root * root,struct btrfs_path * p,int write_lock_level)1857 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1858 struct btrfs_path *p,
1859 int write_lock_level)
1860 {
1861 struct extent_buffer *b;
1862 int root_lock = 0;
1863 int level = 0;
1864
1865 if (p->search_commit_root) {
1866 b = root->commit_root;
1867 atomic_inc(&b->refs);
1868 level = btrfs_header_level(b);
1869 /*
1870 * Ensure that all callers have set skip_locking when
1871 * p->search_commit_root = 1.
1872 */
1873 ASSERT(p->skip_locking == 1);
1874
1875 goto out;
1876 }
1877
1878 if (p->skip_locking) {
1879 b = btrfs_root_node(root);
1880 level = btrfs_header_level(b);
1881 goto out;
1882 }
1883
1884 /* We try very hard to do read locks on the root */
1885 root_lock = BTRFS_READ_LOCK;
1886
1887 /*
1888 * If the level is set to maximum, we can skip trying to get the read
1889 * lock.
1890 */
1891 if (write_lock_level < BTRFS_MAX_LEVEL) {
1892 /*
1893 * We don't know the level of the root node until we actually
1894 * have it read locked
1895 */
1896 if (p->nowait) {
1897 b = btrfs_try_read_lock_root_node(root);
1898 if (IS_ERR(b))
1899 return b;
1900 } else {
1901 b = btrfs_read_lock_root_node(root);
1902 }
1903 level = btrfs_header_level(b);
1904 if (level > write_lock_level)
1905 goto out;
1906
1907 /* Whoops, must trade for write lock */
1908 btrfs_tree_read_unlock(b);
1909 free_extent_buffer(b);
1910 }
1911
1912 b = btrfs_lock_root_node(root);
1913 root_lock = BTRFS_WRITE_LOCK;
1914
1915 /* The level might have changed, check again */
1916 level = btrfs_header_level(b);
1917
1918 out:
1919 /*
1920 * The root may have failed to write out at some point, and thus is no
1921 * longer valid, return an error in this case.
1922 */
1923 if (!extent_buffer_uptodate(b)) {
1924 if (root_lock)
1925 btrfs_tree_unlock_rw(b, root_lock);
1926 free_extent_buffer(b);
1927 return ERR_PTR(-EIO);
1928 }
1929
1930 p->nodes[level] = b;
1931 if (!p->skip_locking)
1932 p->locks[level] = root_lock;
1933 /*
1934 * Callers are responsible for dropping b's references.
1935 */
1936 return b;
1937 }
1938
1939 /*
1940 * Replace the extent buffer at the lowest level of the path with a cloned
1941 * version. The purpose is to be able to use it safely, after releasing the
1942 * commit root semaphore, even if relocation is happening in parallel, the
1943 * transaction used for relocation is committed and the extent buffer is
1944 * reallocated in the next transaction.
1945 *
1946 * This is used in a context where the caller does not prevent transaction
1947 * commits from happening, either by holding a transaction handle or holding
1948 * some lock, while it's doing searches through a commit root.
1949 * At the moment it's only used for send operations.
1950 */
finish_need_commit_sem_search(struct btrfs_path * path)1951 static int finish_need_commit_sem_search(struct btrfs_path *path)
1952 {
1953 const int i = path->lowest_level;
1954 const int slot = path->slots[i];
1955 struct extent_buffer *lowest = path->nodes[i];
1956 struct extent_buffer *clone;
1957
1958 ASSERT(path->need_commit_sem);
1959
1960 if (!lowest)
1961 return 0;
1962
1963 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1964
1965 clone = btrfs_clone_extent_buffer(lowest);
1966 if (!clone)
1967 return -ENOMEM;
1968
1969 btrfs_release_path(path);
1970 path->nodes[i] = clone;
1971 path->slots[i] = slot;
1972
1973 return 0;
1974 }
1975
search_for_key_slot(struct extent_buffer * eb,int search_low_slot,const struct btrfs_key * key,int prev_cmp,int * slot)1976 static inline int search_for_key_slot(struct extent_buffer *eb,
1977 int search_low_slot,
1978 const struct btrfs_key *key,
1979 int prev_cmp,
1980 int *slot)
1981 {
1982 /*
1983 * If a previous call to btrfs_bin_search() on a parent node returned an
1984 * exact match (prev_cmp == 0), we can safely assume the target key will
1985 * always be at slot 0 on lower levels, since each key pointer
1986 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1987 * subtree it points to. Thus we can skip searching lower levels.
1988 */
1989 if (prev_cmp == 0) {
1990 *slot = 0;
1991 return 0;
1992 }
1993
1994 return btrfs_bin_search(eb, search_low_slot, key, slot);
1995 }
1996
search_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * path,int ins_len,int prev_cmp)1997 static int search_leaf(struct btrfs_trans_handle *trans,
1998 struct btrfs_root *root,
1999 const struct btrfs_key *key,
2000 struct btrfs_path *path,
2001 int ins_len,
2002 int prev_cmp)
2003 {
2004 struct extent_buffer *leaf = path->nodes[0];
2005 int leaf_free_space = -1;
2006 int search_low_slot = 0;
2007 int ret;
2008 bool do_bin_search = true;
2009
2010 /*
2011 * If we are doing an insertion, the leaf has enough free space and the
2012 * destination slot for the key is not slot 0, then we can unlock our
2013 * write lock on the parent, and any other upper nodes, before doing the
2014 * binary search on the leaf (with search_for_key_slot()), allowing other
2015 * tasks to lock the parent and any other upper nodes.
2016 */
2017 if (ins_len > 0) {
2018 /*
2019 * Cache the leaf free space, since we will need it later and it
2020 * will not change until then.
2021 */
2022 leaf_free_space = btrfs_leaf_free_space(leaf);
2023
2024 /*
2025 * !path->locks[1] means we have a single node tree, the leaf is
2026 * the root of the tree.
2027 */
2028 if (path->locks[1] && leaf_free_space >= ins_len) {
2029 struct btrfs_disk_key first_key;
2030
2031 ASSERT(btrfs_header_nritems(leaf) > 0);
2032 btrfs_item_key(leaf, &first_key, 0);
2033
2034 /*
2035 * Doing the extra comparison with the first key is cheap,
2036 * taking into account that the first key is very likely
2037 * already in a cache line because it immediately follows
2038 * the extent buffer's header and we have recently accessed
2039 * the header's level field.
2040 */
2041 ret = comp_keys(&first_key, key);
2042 if (ret < 0) {
2043 /*
2044 * The first key is smaller than the key we want
2045 * to insert, so we are safe to unlock all upper
2046 * nodes and we have to do the binary search.
2047 *
2048 * We do use btrfs_unlock_up_safe() and not
2049 * unlock_up() because the later does not unlock
2050 * nodes with a slot of 0 - we can safely unlock
2051 * any node even if its slot is 0 since in this
2052 * case the key does not end up at slot 0 of the
2053 * leaf and there's no need to split the leaf.
2054 */
2055 btrfs_unlock_up_safe(path, 1);
2056 search_low_slot = 1;
2057 } else {
2058 /*
2059 * The first key is >= then the key we want to
2060 * insert, so we can skip the binary search as
2061 * the target key will be at slot 0.
2062 *
2063 * We can not unlock upper nodes when the key is
2064 * less than the first key, because we will need
2065 * to update the key at slot 0 of the parent node
2066 * and possibly of other upper nodes too.
2067 * If the key matches the first key, then we can
2068 * unlock all the upper nodes, using
2069 * btrfs_unlock_up_safe() instead of unlock_up()
2070 * as stated above.
2071 */
2072 if (ret == 0)
2073 btrfs_unlock_up_safe(path, 1);
2074 /*
2075 * ret is already 0 or 1, matching the result of
2076 * a btrfs_bin_search() call, so there is no need
2077 * to adjust it.
2078 */
2079 do_bin_search = false;
2080 path->slots[0] = 0;
2081 }
2082 }
2083 }
2084
2085 if (do_bin_search) {
2086 ret = search_for_key_slot(leaf, search_low_slot, key,
2087 prev_cmp, &path->slots[0]);
2088 if (ret < 0)
2089 return ret;
2090 }
2091
2092 if (ins_len > 0) {
2093 /*
2094 * Item key already exists. In this case, if we are allowed to
2095 * insert the item (for example, in dir_item case, item key
2096 * collision is allowed), it will be merged with the original
2097 * item. Only the item size grows, no new btrfs item will be
2098 * added. If search_for_extension is not set, ins_len already
2099 * accounts the size btrfs_item, deduct it here so leaf space
2100 * check will be correct.
2101 */
2102 if (ret == 0 && !path->search_for_extension) {
2103 ASSERT(ins_len >= sizeof(struct btrfs_item));
2104 ins_len -= sizeof(struct btrfs_item);
2105 }
2106
2107 ASSERT(leaf_free_space >= 0);
2108
2109 if (leaf_free_space < ins_len) {
2110 int err;
2111
2112 err = split_leaf(trans, root, key, path, ins_len,
2113 (ret == 0));
2114 ASSERT(err <= 0);
2115 if (WARN_ON(err > 0))
2116 err = -EUCLEAN;
2117 if (err)
2118 ret = err;
2119 }
2120 }
2121
2122 return ret;
2123 }
2124
2125 /*
2126 * btrfs_search_slot - look for a key in a tree and perform necessary
2127 * modifications to preserve tree invariants.
2128 *
2129 * @trans: Handle of transaction, used when modifying the tree
2130 * @p: Holds all btree nodes along the search path
2131 * @root: The root node of the tree
2132 * @key: The key we are looking for
2133 * @ins_len: Indicates purpose of search:
2134 * >0 for inserts it's size of item inserted (*)
2135 * <0 for deletions
2136 * 0 for plain searches, not modifying the tree
2137 *
2138 * (*) If size of item inserted doesn't include
2139 * sizeof(struct btrfs_item), then p->search_for_extension must
2140 * be set.
2141 * @cow: boolean should CoW operations be performed. Must always be 1
2142 * when modifying the tree.
2143 *
2144 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2145 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2146 *
2147 * If @key is found, 0 is returned and you can find the item in the leaf level
2148 * of the path (level 0)
2149 *
2150 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2151 * points to the slot where it should be inserted
2152 *
2153 * If an error is encountered while searching the tree a negative error number
2154 * is returned
2155 */
btrfs_search_slot(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int ins_len,int cow)2156 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2157 const struct btrfs_key *key, struct btrfs_path *p,
2158 int ins_len, int cow)
2159 {
2160 struct btrfs_fs_info *fs_info;
2161 struct extent_buffer *b;
2162 int slot;
2163 int ret;
2164 int err;
2165 int level;
2166 int lowest_unlock = 1;
2167 /* everything at write_lock_level or lower must be write locked */
2168 int write_lock_level = 0;
2169 u8 lowest_level = 0;
2170 int min_write_lock_level;
2171 int prev_cmp;
2172
2173 if (!root)
2174 return -EINVAL;
2175
2176 fs_info = root->fs_info;
2177 might_sleep();
2178
2179 lowest_level = p->lowest_level;
2180 WARN_ON(lowest_level && ins_len > 0);
2181 WARN_ON(p->nodes[0] != NULL);
2182 BUG_ON(!cow && ins_len);
2183
2184 /*
2185 * For now only allow nowait for read only operations. There's no
2186 * strict reason why we can't, we just only need it for reads so it's
2187 * only implemented for reads.
2188 */
2189 ASSERT(!p->nowait || !cow);
2190
2191 if (ins_len < 0) {
2192 lowest_unlock = 2;
2193
2194 /* when we are removing items, we might have to go up to level
2195 * two as we update tree pointers Make sure we keep write
2196 * for those levels as well
2197 */
2198 write_lock_level = 2;
2199 } else if (ins_len > 0) {
2200 /*
2201 * for inserting items, make sure we have a write lock on
2202 * level 1 so we can update keys
2203 */
2204 write_lock_level = 1;
2205 }
2206
2207 if (!cow)
2208 write_lock_level = -1;
2209
2210 if (cow && (p->keep_locks || p->lowest_level))
2211 write_lock_level = BTRFS_MAX_LEVEL;
2212
2213 min_write_lock_level = write_lock_level;
2214
2215 if (p->need_commit_sem) {
2216 ASSERT(p->search_commit_root);
2217 if (p->nowait) {
2218 if (!down_read_trylock(&fs_info->commit_root_sem))
2219 return -EAGAIN;
2220 } else {
2221 down_read(&fs_info->commit_root_sem);
2222 }
2223 }
2224
2225 again:
2226 prev_cmp = -1;
2227 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2228 if (IS_ERR(b)) {
2229 ret = PTR_ERR(b);
2230 goto done;
2231 }
2232
2233 while (b) {
2234 int dec = 0;
2235
2236 level = btrfs_header_level(b);
2237
2238 if (cow) {
2239 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2240
2241 /*
2242 * if we don't really need to cow this block
2243 * then we don't want to set the path blocking,
2244 * so we test it here
2245 */
2246 if (!should_cow_block(trans, root, b))
2247 goto cow_done;
2248
2249 /*
2250 * must have write locks on this node and the
2251 * parent
2252 */
2253 if (level > write_lock_level ||
2254 (level + 1 > write_lock_level &&
2255 level + 1 < BTRFS_MAX_LEVEL &&
2256 p->nodes[level + 1])) {
2257 write_lock_level = level + 1;
2258 btrfs_release_path(p);
2259 goto again;
2260 }
2261
2262 if (last_level)
2263 err = btrfs_cow_block(trans, root, b, NULL, 0,
2264 &b,
2265 BTRFS_NESTING_COW);
2266 else
2267 err = btrfs_cow_block(trans, root, b,
2268 p->nodes[level + 1],
2269 p->slots[level + 1], &b,
2270 BTRFS_NESTING_COW);
2271 if (err) {
2272 ret = err;
2273 goto done;
2274 }
2275 }
2276 cow_done:
2277 p->nodes[level] = b;
2278
2279 /*
2280 * we have a lock on b and as long as we aren't changing
2281 * the tree, there is no way to for the items in b to change.
2282 * It is safe to drop the lock on our parent before we
2283 * go through the expensive btree search on b.
2284 *
2285 * If we're inserting or deleting (ins_len != 0), then we might
2286 * be changing slot zero, which may require changing the parent.
2287 * So, we can't drop the lock until after we know which slot
2288 * we're operating on.
2289 */
2290 if (!ins_len && !p->keep_locks) {
2291 int u = level + 1;
2292
2293 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2294 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2295 p->locks[u] = 0;
2296 }
2297 }
2298
2299 if (level == 0) {
2300 if (ins_len > 0)
2301 ASSERT(write_lock_level >= 1);
2302
2303 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2304 if (!p->search_for_split)
2305 unlock_up(p, level, lowest_unlock,
2306 min_write_lock_level, NULL);
2307 goto done;
2308 }
2309
2310 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2311 if (ret < 0)
2312 goto done;
2313 prev_cmp = ret;
2314
2315 if (ret && slot > 0) {
2316 dec = 1;
2317 slot--;
2318 }
2319 p->slots[level] = slot;
2320 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2321 &write_lock_level);
2322 if (err == -EAGAIN)
2323 goto again;
2324 if (err) {
2325 ret = err;
2326 goto done;
2327 }
2328 b = p->nodes[level];
2329 slot = p->slots[level];
2330
2331 /*
2332 * Slot 0 is special, if we change the key we have to update
2333 * the parent pointer which means we must have a write lock on
2334 * the parent
2335 */
2336 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2337 write_lock_level = level + 1;
2338 btrfs_release_path(p);
2339 goto again;
2340 }
2341
2342 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2343 &write_lock_level);
2344
2345 if (level == lowest_level) {
2346 if (dec)
2347 p->slots[level]++;
2348 goto done;
2349 }
2350
2351 err = read_block_for_search(root, p, &b, level, slot, key);
2352 if (err == -EAGAIN)
2353 goto again;
2354 if (err) {
2355 ret = err;
2356 goto done;
2357 }
2358
2359 if (!p->skip_locking) {
2360 level = btrfs_header_level(b);
2361
2362 btrfs_maybe_reset_lockdep_class(root, b);
2363
2364 if (level <= write_lock_level) {
2365 btrfs_tree_lock(b);
2366 p->locks[level] = BTRFS_WRITE_LOCK;
2367 } else {
2368 if (p->nowait) {
2369 if (!btrfs_try_tree_read_lock(b)) {
2370 free_extent_buffer(b);
2371 ret = -EAGAIN;
2372 goto done;
2373 }
2374 } else {
2375 btrfs_tree_read_lock(b);
2376 }
2377 p->locks[level] = BTRFS_READ_LOCK;
2378 }
2379 p->nodes[level] = b;
2380 }
2381 }
2382 ret = 1;
2383 done:
2384 if (ret < 0 && !p->skip_release_on_error)
2385 btrfs_release_path(p);
2386
2387 if (p->need_commit_sem) {
2388 int ret2;
2389
2390 ret2 = finish_need_commit_sem_search(p);
2391 up_read(&fs_info->commit_root_sem);
2392 if (ret2)
2393 ret = ret2;
2394 }
2395
2396 return ret;
2397 }
2398 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2399
2400 /*
2401 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2402 * current state of the tree together with the operations recorded in the tree
2403 * modification log to search for the key in a previous version of this tree, as
2404 * denoted by the time_seq parameter.
2405 *
2406 * Naturally, there is no support for insert, delete or cow operations.
2407 *
2408 * The resulting path and return value will be set up as if we called
2409 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2410 */
btrfs_search_old_slot(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,u64 time_seq)2411 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2412 struct btrfs_path *p, u64 time_seq)
2413 {
2414 struct btrfs_fs_info *fs_info = root->fs_info;
2415 struct extent_buffer *b;
2416 int slot;
2417 int ret;
2418 int err;
2419 int level;
2420 int lowest_unlock = 1;
2421 u8 lowest_level = 0;
2422
2423 lowest_level = p->lowest_level;
2424 WARN_ON(p->nodes[0] != NULL);
2425 ASSERT(!p->nowait);
2426
2427 if (p->search_commit_root) {
2428 BUG_ON(time_seq);
2429 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2430 }
2431
2432 again:
2433 b = btrfs_get_old_root(root, time_seq);
2434 if (!b) {
2435 ret = -EIO;
2436 goto done;
2437 }
2438 level = btrfs_header_level(b);
2439 p->locks[level] = BTRFS_READ_LOCK;
2440
2441 while (b) {
2442 int dec = 0;
2443
2444 level = btrfs_header_level(b);
2445 p->nodes[level] = b;
2446
2447 /*
2448 * we have a lock on b and as long as we aren't changing
2449 * the tree, there is no way to for the items in b to change.
2450 * It is safe to drop the lock on our parent before we
2451 * go through the expensive btree search on b.
2452 */
2453 btrfs_unlock_up_safe(p, level + 1);
2454
2455 ret = btrfs_bin_search(b, 0, key, &slot);
2456 if (ret < 0)
2457 goto done;
2458
2459 if (level == 0) {
2460 p->slots[level] = slot;
2461 unlock_up(p, level, lowest_unlock, 0, NULL);
2462 goto done;
2463 }
2464
2465 if (ret && slot > 0) {
2466 dec = 1;
2467 slot--;
2468 }
2469 p->slots[level] = slot;
2470 unlock_up(p, level, lowest_unlock, 0, NULL);
2471
2472 if (level == lowest_level) {
2473 if (dec)
2474 p->slots[level]++;
2475 goto done;
2476 }
2477
2478 err = read_block_for_search(root, p, &b, level, slot, key);
2479 if (err == -EAGAIN)
2480 goto again;
2481 if (err) {
2482 ret = err;
2483 goto done;
2484 }
2485
2486 level = btrfs_header_level(b);
2487 btrfs_tree_read_lock(b);
2488 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2489 if (!b) {
2490 ret = -ENOMEM;
2491 goto done;
2492 }
2493 p->locks[level] = BTRFS_READ_LOCK;
2494 p->nodes[level] = b;
2495 }
2496 ret = 1;
2497 done:
2498 if (ret < 0)
2499 btrfs_release_path(p);
2500
2501 return ret;
2502 }
2503
2504 /*
2505 * Search the tree again to find a leaf with smaller keys.
2506 * Returns 0 if it found something.
2507 * Returns 1 if there are no smaller keys.
2508 * Returns < 0 on error.
2509 *
2510 * This may release the path, and so you may lose any locks held at the
2511 * time you call it.
2512 */
btrfs_prev_leaf(struct btrfs_root * root,struct btrfs_path * path)2513 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2514 {
2515 struct btrfs_key key;
2516 struct btrfs_key orig_key;
2517 struct btrfs_disk_key found_key;
2518 int ret;
2519
2520 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
2521 orig_key = key;
2522
2523 if (key.offset > 0) {
2524 key.offset--;
2525 } else if (key.type > 0) {
2526 key.type--;
2527 key.offset = (u64)-1;
2528 } else if (key.objectid > 0) {
2529 key.objectid--;
2530 key.type = (u8)-1;
2531 key.offset = (u64)-1;
2532 } else {
2533 return 1;
2534 }
2535
2536 btrfs_release_path(path);
2537 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2538 if (ret <= 0)
2539 return ret;
2540
2541 /*
2542 * Previous key not found. Even if we were at slot 0 of the leaf we had
2543 * before releasing the path and calling btrfs_search_slot(), we now may
2544 * be in a slot pointing to the same original key - this can happen if
2545 * after we released the path, one of more items were moved from a
2546 * sibling leaf into the front of the leaf we had due to an insertion
2547 * (see push_leaf_right()).
2548 * If we hit this case and our slot is > 0 and just decrement the slot
2549 * so that the caller does not process the same key again, which may or
2550 * may not break the caller, depending on its logic.
2551 */
2552 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
2553 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
2554 ret = comp_keys(&found_key, &orig_key);
2555 if (ret == 0) {
2556 if (path->slots[0] > 0) {
2557 path->slots[0]--;
2558 return 0;
2559 }
2560 /*
2561 * At slot 0, same key as before, it means orig_key is
2562 * the lowest, leftmost, key in the tree. We're done.
2563 */
2564 return 1;
2565 }
2566 }
2567
2568 btrfs_item_key(path->nodes[0], &found_key, 0);
2569 ret = comp_keys(&found_key, &key);
2570 /*
2571 * We might have had an item with the previous key in the tree right
2572 * before we released our path. And after we released our path, that
2573 * item might have been pushed to the first slot (0) of the leaf we
2574 * were holding due to a tree balance. Alternatively, an item with the
2575 * previous key can exist as the only element of a leaf (big fat item).
2576 * Therefore account for these 2 cases, so that our callers (like
2577 * btrfs_previous_item) don't miss an existing item with a key matching
2578 * the previous key we computed above.
2579 */
2580 if (ret <= 0)
2581 return 0;
2582 return 1;
2583 }
2584
2585 /*
2586 * helper to use instead of search slot if no exact match is needed but
2587 * instead the next or previous item should be returned.
2588 * When find_higher is true, the next higher item is returned, the next lower
2589 * otherwise.
2590 * When return_any and find_higher are both true, and no higher item is found,
2591 * return the next lower instead.
2592 * When return_any is true and find_higher is false, and no lower item is found,
2593 * return the next higher instead.
2594 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2595 * < 0 on error
2596 */
btrfs_search_slot_for_read(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int find_higher,int return_any)2597 int btrfs_search_slot_for_read(struct btrfs_root *root,
2598 const struct btrfs_key *key,
2599 struct btrfs_path *p, int find_higher,
2600 int return_any)
2601 {
2602 int ret;
2603 struct extent_buffer *leaf;
2604
2605 again:
2606 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2607 if (ret <= 0)
2608 return ret;
2609 /*
2610 * a return value of 1 means the path is at the position where the
2611 * item should be inserted. Normally this is the next bigger item,
2612 * but in case the previous item is the last in a leaf, path points
2613 * to the first free slot in the previous leaf, i.e. at an invalid
2614 * item.
2615 */
2616 leaf = p->nodes[0];
2617
2618 if (find_higher) {
2619 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2620 ret = btrfs_next_leaf(root, p);
2621 if (ret <= 0)
2622 return ret;
2623 if (!return_any)
2624 return 1;
2625 /*
2626 * no higher item found, return the next
2627 * lower instead
2628 */
2629 return_any = 0;
2630 find_higher = 0;
2631 btrfs_release_path(p);
2632 goto again;
2633 }
2634 } else {
2635 if (p->slots[0] == 0) {
2636 ret = btrfs_prev_leaf(root, p);
2637 if (ret < 0)
2638 return ret;
2639 if (!ret) {
2640 leaf = p->nodes[0];
2641 if (p->slots[0] == btrfs_header_nritems(leaf))
2642 p->slots[0]--;
2643 return 0;
2644 }
2645 if (!return_any)
2646 return 1;
2647 /*
2648 * no lower item found, return the next
2649 * higher instead
2650 */
2651 return_any = 0;
2652 find_higher = 1;
2653 btrfs_release_path(p);
2654 goto again;
2655 } else {
2656 --p->slots[0];
2657 }
2658 }
2659 return 0;
2660 }
2661
2662 /*
2663 * Execute search and call btrfs_previous_item to traverse backwards if the item
2664 * was not found.
2665 *
2666 * Return 0 if found, 1 if not found and < 0 if error.
2667 */
btrfs_search_backwards(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2668 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2669 struct btrfs_path *path)
2670 {
2671 int ret;
2672
2673 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2674 if (ret > 0)
2675 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2676
2677 if (ret == 0)
2678 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2679
2680 return ret;
2681 }
2682
2683 /*
2684 * Search for a valid slot for the given path.
2685 *
2686 * @root: The root node of the tree.
2687 * @key: Will contain a valid item if found.
2688 * @path: The starting point to validate the slot.
2689 *
2690 * Return: 0 if the item is valid
2691 * 1 if not found
2692 * <0 if error.
2693 */
btrfs_get_next_valid_item(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2694 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2695 struct btrfs_path *path)
2696 {
2697 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2698 int ret;
2699
2700 ret = btrfs_next_leaf(root, path);
2701 if (ret)
2702 return ret;
2703 }
2704
2705 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2706 return 0;
2707 }
2708
2709 /*
2710 * adjust the pointers going up the tree, starting at level
2711 * making sure the right key of each node is points to 'key'.
2712 * This is used after shifting pointers to the left, so it stops
2713 * fixing up pointers when a given leaf/node is not in slot 0 of the
2714 * higher levels
2715 *
2716 */
fixup_low_keys(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_disk_key * key,int level)2717 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2718 struct btrfs_path *path,
2719 struct btrfs_disk_key *key, int level)
2720 {
2721 int i;
2722 struct extent_buffer *t;
2723 int ret;
2724
2725 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2726 int tslot = path->slots[i];
2727
2728 if (!path->nodes[i])
2729 break;
2730 t = path->nodes[i];
2731 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2732 BTRFS_MOD_LOG_KEY_REPLACE);
2733 BUG_ON(ret < 0);
2734 btrfs_set_node_key(t, key, tslot);
2735 btrfs_mark_buffer_dirty(trans, path->nodes[i]);
2736 if (tslot != 0)
2737 break;
2738 }
2739 }
2740
2741 /*
2742 * update item key.
2743 *
2744 * This function isn't completely safe. It's the caller's responsibility
2745 * that the new key won't break the order
2746 */
btrfs_set_item_key_safe(struct btrfs_trans_handle * trans,struct btrfs_path * path,const struct btrfs_key * new_key)2747 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2748 struct btrfs_path *path,
2749 const struct btrfs_key *new_key)
2750 {
2751 struct btrfs_fs_info *fs_info = trans->fs_info;
2752 struct btrfs_disk_key disk_key;
2753 struct extent_buffer *eb;
2754 int slot;
2755
2756 eb = path->nodes[0];
2757 slot = path->slots[0];
2758 if (slot > 0) {
2759 btrfs_item_key(eb, &disk_key, slot - 1);
2760 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2761 btrfs_print_leaf(eb);
2762 btrfs_crit(fs_info,
2763 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2764 slot, btrfs_disk_key_objectid(&disk_key),
2765 btrfs_disk_key_type(&disk_key),
2766 btrfs_disk_key_offset(&disk_key),
2767 new_key->objectid, new_key->type,
2768 new_key->offset);
2769 BUG();
2770 }
2771 }
2772 if (slot < btrfs_header_nritems(eb) - 1) {
2773 btrfs_item_key(eb, &disk_key, slot + 1);
2774 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2775 btrfs_print_leaf(eb);
2776 btrfs_crit(fs_info,
2777 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2778 slot, btrfs_disk_key_objectid(&disk_key),
2779 btrfs_disk_key_type(&disk_key),
2780 btrfs_disk_key_offset(&disk_key),
2781 new_key->objectid, new_key->type,
2782 new_key->offset);
2783 BUG();
2784 }
2785 }
2786
2787 btrfs_cpu_key_to_disk(&disk_key, new_key);
2788 btrfs_set_item_key(eb, &disk_key, slot);
2789 btrfs_mark_buffer_dirty(trans, eb);
2790 if (slot == 0)
2791 fixup_low_keys(trans, path, &disk_key, 1);
2792 }
2793
2794 /*
2795 * Check key order of two sibling extent buffers.
2796 *
2797 * Return true if something is wrong.
2798 * Return false if everything is fine.
2799 *
2800 * Tree-checker only works inside one tree block, thus the following
2801 * corruption can not be detected by tree-checker:
2802 *
2803 * Leaf @left | Leaf @right
2804 * --------------------------------------------------------------
2805 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2806 *
2807 * Key f6 in leaf @left itself is valid, but not valid when the next
2808 * key in leaf @right is 7.
2809 * This can only be checked at tree block merge time.
2810 * And since tree checker has ensured all key order in each tree block
2811 * is correct, we only need to bother the last key of @left and the first
2812 * key of @right.
2813 */
check_sibling_keys(struct extent_buffer * left,struct extent_buffer * right)2814 static bool check_sibling_keys(struct extent_buffer *left,
2815 struct extent_buffer *right)
2816 {
2817 struct btrfs_key left_last;
2818 struct btrfs_key right_first;
2819 int level = btrfs_header_level(left);
2820 int nr_left = btrfs_header_nritems(left);
2821 int nr_right = btrfs_header_nritems(right);
2822
2823 /* No key to check in one of the tree blocks */
2824 if (!nr_left || !nr_right)
2825 return false;
2826
2827 if (level) {
2828 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2829 btrfs_node_key_to_cpu(right, &right_first, 0);
2830 } else {
2831 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2832 btrfs_item_key_to_cpu(right, &right_first, 0);
2833 }
2834
2835 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) {
2836 btrfs_crit(left->fs_info, "left extent buffer:");
2837 btrfs_print_tree(left, false);
2838 btrfs_crit(left->fs_info, "right extent buffer:");
2839 btrfs_print_tree(right, false);
2840 btrfs_crit(left->fs_info,
2841 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2842 left_last.objectid, left_last.type,
2843 left_last.offset, right_first.objectid,
2844 right_first.type, right_first.offset);
2845 return true;
2846 }
2847 return false;
2848 }
2849
2850 /*
2851 * try to push data from one node into the next node left in the
2852 * tree.
2853 *
2854 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2855 * error, and > 0 if there was no room in the left hand block.
2856 */
push_node_left(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src,int empty)2857 static int push_node_left(struct btrfs_trans_handle *trans,
2858 struct extent_buffer *dst,
2859 struct extent_buffer *src, int empty)
2860 {
2861 struct btrfs_fs_info *fs_info = trans->fs_info;
2862 int push_items = 0;
2863 int src_nritems;
2864 int dst_nritems;
2865 int ret = 0;
2866
2867 src_nritems = btrfs_header_nritems(src);
2868 dst_nritems = btrfs_header_nritems(dst);
2869 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2870 WARN_ON(btrfs_header_generation(src) != trans->transid);
2871 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2872
2873 if (!empty && src_nritems <= 8)
2874 return 1;
2875
2876 if (push_items <= 0)
2877 return 1;
2878
2879 if (empty) {
2880 push_items = min(src_nritems, push_items);
2881 if (push_items < src_nritems) {
2882 /* leave at least 8 pointers in the node if
2883 * we aren't going to empty it
2884 */
2885 if (src_nritems - push_items < 8) {
2886 if (push_items <= 8)
2887 return 1;
2888 push_items -= 8;
2889 }
2890 }
2891 } else
2892 push_items = min(src_nritems - 8, push_items);
2893
2894 /* dst is the left eb, src is the middle eb */
2895 if (check_sibling_keys(dst, src)) {
2896 ret = -EUCLEAN;
2897 btrfs_abort_transaction(trans, ret);
2898 return ret;
2899 }
2900 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2901 if (ret) {
2902 btrfs_abort_transaction(trans, ret);
2903 return ret;
2904 }
2905 copy_extent_buffer(dst, src,
2906 btrfs_node_key_ptr_offset(dst, dst_nritems),
2907 btrfs_node_key_ptr_offset(src, 0),
2908 push_items * sizeof(struct btrfs_key_ptr));
2909
2910 if (push_items < src_nritems) {
2911 /*
2912 * btrfs_tree_mod_log_eb_copy handles logging the move, so we
2913 * don't need to do an explicit tree mod log operation for it.
2914 */
2915 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
2916 btrfs_node_key_ptr_offset(src, push_items),
2917 (src_nritems - push_items) *
2918 sizeof(struct btrfs_key_ptr));
2919 }
2920 btrfs_set_header_nritems(src, src_nritems - push_items);
2921 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2922 btrfs_mark_buffer_dirty(trans, src);
2923 btrfs_mark_buffer_dirty(trans, dst);
2924
2925 return ret;
2926 }
2927
2928 /*
2929 * try to push data from one node into the next node right in the
2930 * tree.
2931 *
2932 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2933 * error, and > 0 if there was no room in the right hand block.
2934 *
2935 * this will only push up to 1/2 the contents of the left node over
2936 */
balance_node_right(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src)2937 static int balance_node_right(struct btrfs_trans_handle *trans,
2938 struct extent_buffer *dst,
2939 struct extent_buffer *src)
2940 {
2941 struct btrfs_fs_info *fs_info = trans->fs_info;
2942 int push_items = 0;
2943 int max_push;
2944 int src_nritems;
2945 int dst_nritems;
2946 int ret = 0;
2947
2948 WARN_ON(btrfs_header_generation(src) != trans->transid);
2949 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2950
2951 src_nritems = btrfs_header_nritems(src);
2952 dst_nritems = btrfs_header_nritems(dst);
2953 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2954 if (push_items <= 0)
2955 return 1;
2956
2957 if (src_nritems < 4)
2958 return 1;
2959
2960 max_push = src_nritems / 2 + 1;
2961 /* don't try to empty the node */
2962 if (max_push >= src_nritems)
2963 return 1;
2964
2965 if (max_push < push_items)
2966 push_items = max_push;
2967
2968 /* dst is the right eb, src is the middle eb */
2969 if (check_sibling_keys(src, dst)) {
2970 ret = -EUCLEAN;
2971 btrfs_abort_transaction(trans, ret);
2972 return ret;
2973 }
2974
2975 /*
2976 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't
2977 * need to do an explicit tree mod log operation for it.
2978 */
2979 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
2980 btrfs_node_key_ptr_offset(dst, 0),
2981 (dst_nritems) *
2982 sizeof(struct btrfs_key_ptr));
2983
2984 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2985 push_items);
2986 if (ret) {
2987 btrfs_abort_transaction(trans, ret);
2988 return ret;
2989 }
2990 copy_extent_buffer(dst, src,
2991 btrfs_node_key_ptr_offset(dst, 0),
2992 btrfs_node_key_ptr_offset(src, src_nritems - push_items),
2993 push_items * sizeof(struct btrfs_key_ptr));
2994
2995 btrfs_set_header_nritems(src, src_nritems - push_items);
2996 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2997
2998 btrfs_mark_buffer_dirty(trans, src);
2999 btrfs_mark_buffer_dirty(trans, dst);
3000
3001 return ret;
3002 }
3003
3004 /*
3005 * helper function to insert a new root level in the tree.
3006 * A new node is allocated, and a single item is inserted to
3007 * point to the existing root
3008 *
3009 * returns zero on success or < 0 on failure.
3010 */
insert_new_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)3011 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3012 struct btrfs_root *root,
3013 struct btrfs_path *path, int level)
3014 {
3015 struct btrfs_fs_info *fs_info = root->fs_info;
3016 u64 lower_gen;
3017 struct extent_buffer *lower;
3018 struct extent_buffer *c;
3019 struct extent_buffer *old;
3020 struct btrfs_disk_key lower_key;
3021 int ret;
3022
3023 BUG_ON(path->nodes[level]);
3024 BUG_ON(path->nodes[level-1] != root->node);
3025
3026 lower = path->nodes[level-1];
3027 if (level == 1)
3028 btrfs_item_key(lower, &lower_key, 0);
3029 else
3030 btrfs_node_key(lower, &lower_key, 0);
3031
3032 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3033 &lower_key, level, root->node->start, 0,
3034 BTRFS_NESTING_NEW_ROOT);
3035 if (IS_ERR(c))
3036 return PTR_ERR(c);
3037
3038 root_add_used(root, fs_info->nodesize);
3039
3040 btrfs_set_header_nritems(c, 1);
3041 btrfs_set_node_key(c, &lower_key, 0);
3042 btrfs_set_node_blockptr(c, 0, lower->start);
3043 lower_gen = btrfs_header_generation(lower);
3044 WARN_ON(lower_gen != trans->transid);
3045
3046 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3047
3048 btrfs_mark_buffer_dirty(trans, c);
3049
3050 old = root->node;
3051 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
3052 if (ret < 0) {
3053 int ret2;
3054
3055 ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
3056 if (ret2 < 0)
3057 btrfs_abort_transaction(trans, ret2);
3058 btrfs_tree_unlock(c);
3059 free_extent_buffer(c);
3060 return ret;
3061 }
3062 rcu_assign_pointer(root->node, c);
3063
3064 /* the super has an extra ref to root->node */
3065 free_extent_buffer(old);
3066
3067 add_root_to_dirty_list(root);
3068 atomic_inc(&c->refs);
3069 path->nodes[level] = c;
3070 path->locks[level] = BTRFS_WRITE_LOCK;
3071 path->slots[level] = 0;
3072 return 0;
3073 }
3074
3075 /*
3076 * worker function to insert a single pointer in a node.
3077 * the node should have enough room for the pointer already
3078 *
3079 * slot and level indicate where you want the key to go, and
3080 * blocknr is the block the key points to.
3081 */
insert_ptr(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_disk_key * key,u64 bytenr,int slot,int level)3082 static int insert_ptr(struct btrfs_trans_handle *trans,
3083 struct btrfs_path *path,
3084 struct btrfs_disk_key *key, u64 bytenr,
3085 int slot, int level)
3086 {
3087 struct extent_buffer *lower;
3088 int nritems;
3089 int ret;
3090
3091 BUG_ON(!path->nodes[level]);
3092 btrfs_assert_tree_write_locked(path->nodes[level]);
3093 lower = path->nodes[level];
3094 nritems = btrfs_header_nritems(lower);
3095 BUG_ON(slot > nritems);
3096 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3097 if (slot != nritems) {
3098 if (level) {
3099 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
3100 slot, nritems - slot);
3101 if (ret < 0) {
3102 btrfs_abort_transaction(trans, ret);
3103 return ret;
3104 }
3105 }
3106 memmove_extent_buffer(lower,
3107 btrfs_node_key_ptr_offset(lower, slot + 1),
3108 btrfs_node_key_ptr_offset(lower, slot),
3109 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3110 }
3111 if (level) {
3112 ret = btrfs_tree_mod_log_insert_key(lower, slot,
3113 BTRFS_MOD_LOG_KEY_ADD);
3114 if (ret < 0) {
3115 btrfs_abort_transaction(trans, ret);
3116 return ret;
3117 }
3118 }
3119 btrfs_set_node_key(lower, key, slot);
3120 btrfs_set_node_blockptr(lower, slot, bytenr);
3121 WARN_ON(trans->transid == 0);
3122 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3123 btrfs_set_header_nritems(lower, nritems + 1);
3124 btrfs_mark_buffer_dirty(trans, lower);
3125
3126 return 0;
3127 }
3128
3129 /*
3130 * split the node at the specified level in path in two.
3131 * The path is corrected to point to the appropriate node after the split
3132 *
3133 * Before splitting this tries to make some room in the node by pushing
3134 * left and right, if either one works, it returns right away.
3135 *
3136 * returns 0 on success and < 0 on failure
3137 */
split_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)3138 static noinline int split_node(struct btrfs_trans_handle *trans,
3139 struct btrfs_root *root,
3140 struct btrfs_path *path, int level)
3141 {
3142 struct btrfs_fs_info *fs_info = root->fs_info;
3143 struct extent_buffer *c;
3144 struct extent_buffer *split;
3145 struct btrfs_disk_key disk_key;
3146 int mid;
3147 int ret;
3148 u32 c_nritems;
3149
3150 c = path->nodes[level];
3151 WARN_ON(btrfs_header_generation(c) != trans->transid);
3152 if (c == root->node) {
3153 /*
3154 * trying to split the root, lets make a new one
3155 *
3156 * tree mod log: We don't log_removal old root in
3157 * insert_new_root, because that root buffer will be kept as a
3158 * normal node. We are going to log removal of half of the
3159 * elements below with btrfs_tree_mod_log_eb_copy(). We're
3160 * holding a tree lock on the buffer, which is why we cannot
3161 * race with other tree_mod_log users.
3162 */
3163 ret = insert_new_root(trans, root, path, level + 1);
3164 if (ret)
3165 return ret;
3166 } else {
3167 ret = push_nodes_for_insert(trans, root, path, level);
3168 c = path->nodes[level];
3169 if (!ret && btrfs_header_nritems(c) <
3170 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3171 return 0;
3172 if (ret < 0)
3173 return ret;
3174 }
3175
3176 c_nritems = btrfs_header_nritems(c);
3177 mid = (c_nritems + 1) / 2;
3178 btrfs_node_key(c, &disk_key, mid);
3179
3180 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3181 &disk_key, level, c->start, 0,
3182 BTRFS_NESTING_SPLIT);
3183 if (IS_ERR(split))
3184 return PTR_ERR(split);
3185
3186 root_add_used(root, fs_info->nodesize);
3187 ASSERT(btrfs_header_level(c) == level);
3188
3189 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3190 if (ret) {
3191 btrfs_tree_unlock(split);
3192 free_extent_buffer(split);
3193 btrfs_abort_transaction(trans, ret);
3194 return ret;
3195 }
3196 copy_extent_buffer(split, c,
3197 btrfs_node_key_ptr_offset(split, 0),
3198 btrfs_node_key_ptr_offset(c, mid),
3199 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3200 btrfs_set_header_nritems(split, c_nritems - mid);
3201 btrfs_set_header_nritems(c, mid);
3202
3203 btrfs_mark_buffer_dirty(trans, c);
3204 btrfs_mark_buffer_dirty(trans, split);
3205
3206 ret = insert_ptr(trans, path, &disk_key, split->start,
3207 path->slots[level + 1] + 1, level + 1);
3208 if (ret < 0) {
3209 btrfs_tree_unlock(split);
3210 free_extent_buffer(split);
3211 return ret;
3212 }
3213
3214 if (path->slots[level] >= mid) {
3215 path->slots[level] -= mid;
3216 btrfs_tree_unlock(c);
3217 free_extent_buffer(c);
3218 path->nodes[level] = split;
3219 path->slots[level + 1] += 1;
3220 } else {
3221 btrfs_tree_unlock(split);
3222 free_extent_buffer(split);
3223 }
3224 return 0;
3225 }
3226
3227 /*
3228 * how many bytes are required to store the items in a leaf. start
3229 * and nr indicate which items in the leaf to check. This totals up the
3230 * space used both by the item structs and the item data
3231 */
leaf_space_used(const struct extent_buffer * l,int start,int nr)3232 static int leaf_space_used(const struct extent_buffer *l, int start, int nr)
3233 {
3234 int data_len;
3235 int nritems = btrfs_header_nritems(l);
3236 int end = min(nritems, start + nr) - 1;
3237
3238 if (!nr)
3239 return 0;
3240 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
3241 data_len = data_len - btrfs_item_offset(l, end);
3242 data_len += sizeof(struct btrfs_item) * nr;
3243 WARN_ON(data_len < 0);
3244 return data_len;
3245 }
3246
3247 /*
3248 * The space between the end of the leaf items and
3249 * the start of the leaf data. IOW, how much room
3250 * the leaf has left for both items and data
3251 */
btrfs_leaf_free_space(const struct extent_buffer * leaf)3252 int btrfs_leaf_free_space(const struct extent_buffer *leaf)
3253 {
3254 struct btrfs_fs_info *fs_info = leaf->fs_info;
3255 int nritems = btrfs_header_nritems(leaf);
3256 int ret;
3257
3258 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3259 if (ret < 0) {
3260 btrfs_crit(fs_info,
3261 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3262 ret,
3263 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3264 leaf_space_used(leaf, 0, nritems), nritems);
3265 }
3266 return ret;
3267 }
3268
3269 /*
3270 * min slot controls the lowest index we're willing to push to the
3271 * right. We'll push up to and including min_slot, but no lower
3272 */
__push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * right,int free_space,u32 left_nritems,u32 min_slot)3273 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3274 struct btrfs_path *path,
3275 int data_size, int empty,
3276 struct extent_buffer *right,
3277 int free_space, u32 left_nritems,
3278 u32 min_slot)
3279 {
3280 struct btrfs_fs_info *fs_info = right->fs_info;
3281 struct extent_buffer *left = path->nodes[0];
3282 struct extent_buffer *upper = path->nodes[1];
3283 struct btrfs_map_token token;
3284 struct btrfs_disk_key disk_key;
3285 int slot;
3286 u32 i;
3287 int push_space = 0;
3288 int push_items = 0;
3289 u32 nr;
3290 u32 right_nritems;
3291 u32 data_end;
3292 u32 this_item_size;
3293
3294 if (empty)
3295 nr = 0;
3296 else
3297 nr = max_t(u32, 1, min_slot);
3298
3299 if (path->slots[0] >= left_nritems)
3300 push_space += data_size;
3301
3302 slot = path->slots[1];
3303 i = left_nritems - 1;
3304 while (i >= nr) {
3305 if (!empty && push_items > 0) {
3306 if (path->slots[0] > i)
3307 break;
3308 if (path->slots[0] == i) {
3309 int space = btrfs_leaf_free_space(left);
3310
3311 if (space + push_space * 2 > free_space)
3312 break;
3313 }
3314 }
3315
3316 if (path->slots[0] == i)
3317 push_space += data_size;
3318
3319 this_item_size = btrfs_item_size(left, i);
3320 if (this_item_size + sizeof(struct btrfs_item) +
3321 push_space > free_space)
3322 break;
3323
3324 push_items++;
3325 push_space += this_item_size + sizeof(struct btrfs_item);
3326 if (i == 0)
3327 break;
3328 i--;
3329 }
3330
3331 if (push_items == 0)
3332 goto out_unlock;
3333
3334 WARN_ON(!empty && push_items == left_nritems);
3335
3336 /* push left to right */
3337 right_nritems = btrfs_header_nritems(right);
3338
3339 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3340 push_space -= leaf_data_end(left);
3341
3342 /* make room in the right data area */
3343 data_end = leaf_data_end(right);
3344 memmove_leaf_data(right, data_end - push_space, data_end,
3345 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3346
3347 /* copy from the left data area */
3348 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3349 leaf_data_end(left), push_space);
3350
3351 memmove_leaf_items(right, push_items, 0, right_nritems);
3352
3353 /* copy the items from left to right */
3354 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
3355
3356 /* update the item pointers */
3357 btrfs_init_map_token(&token, right);
3358 right_nritems += push_items;
3359 btrfs_set_header_nritems(right, right_nritems);
3360 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3361 for (i = 0; i < right_nritems; i++) {
3362 push_space -= btrfs_token_item_size(&token, i);
3363 btrfs_set_token_item_offset(&token, i, push_space);
3364 }
3365
3366 left_nritems -= push_items;
3367 btrfs_set_header_nritems(left, left_nritems);
3368
3369 if (left_nritems)
3370 btrfs_mark_buffer_dirty(trans, left);
3371 else
3372 btrfs_clear_buffer_dirty(trans, left);
3373
3374 btrfs_mark_buffer_dirty(trans, right);
3375
3376 btrfs_item_key(right, &disk_key, 0);
3377 btrfs_set_node_key(upper, &disk_key, slot + 1);
3378 btrfs_mark_buffer_dirty(trans, upper);
3379
3380 /* then fixup the leaf pointer in the path */
3381 if (path->slots[0] >= left_nritems) {
3382 path->slots[0] -= left_nritems;
3383 if (btrfs_header_nritems(path->nodes[0]) == 0)
3384 btrfs_clear_buffer_dirty(trans, path->nodes[0]);
3385 btrfs_tree_unlock(path->nodes[0]);
3386 free_extent_buffer(path->nodes[0]);
3387 path->nodes[0] = right;
3388 path->slots[1] += 1;
3389 } else {
3390 btrfs_tree_unlock(right);
3391 free_extent_buffer(right);
3392 }
3393 return 0;
3394
3395 out_unlock:
3396 btrfs_tree_unlock(right);
3397 free_extent_buffer(right);
3398 return 1;
3399 }
3400
3401 /*
3402 * push some data in the path leaf to the right, trying to free up at
3403 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3404 *
3405 * returns 1 if the push failed because the other node didn't have enough
3406 * room, 0 if everything worked out and < 0 if there were major errors.
3407 *
3408 * this will push starting from min_slot to the end of the leaf. It won't
3409 * push any slot lower than min_slot
3410 */
push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 min_slot)3411 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3412 *root, struct btrfs_path *path,
3413 int min_data_size, int data_size,
3414 int empty, u32 min_slot)
3415 {
3416 struct extent_buffer *left = path->nodes[0];
3417 struct extent_buffer *right;
3418 struct extent_buffer *upper;
3419 int slot;
3420 int free_space;
3421 u32 left_nritems;
3422 int ret;
3423
3424 if (!path->nodes[1])
3425 return 1;
3426
3427 slot = path->slots[1];
3428 upper = path->nodes[1];
3429 if (slot >= btrfs_header_nritems(upper) - 1)
3430 return 1;
3431
3432 btrfs_assert_tree_write_locked(path->nodes[1]);
3433
3434 right = btrfs_read_node_slot(upper, slot + 1);
3435 if (IS_ERR(right))
3436 return PTR_ERR(right);
3437
3438 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
3439
3440 free_space = btrfs_leaf_free_space(right);
3441 if (free_space < data_size)
3442 goto out_unlock;
3443
3444 ret = btrfs_cow_block(trans, root, right, upper,
3445 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3446 if (ret)
3447 goto out_unlock;
3448
3449 left_nritems = btrfs_header_nritems(left);
3450 if (left_nritems == 0)
3451 goto out_unlock;
3452
3453 if (check_sibling_keys(left, right)) {
3454 ret = -EUCLEAN;
3455 btrfs_abort_transaction(trans, ret);
3456 btrfs_tree_unlock(right);
3457 free_extent_buffer(right);
3458 return ret;
3459 }
3460 if (path->slots[0] == left_nritems && !empty) {
3461 /* Key greater than all keys in the leaf, right neighbor has
3462 * enough room for it and we're not emptying our leaf to delete
3463 * it, therefore use right neighbor to insert the new item and
3464 * no need to touch/dirty our left leaf. */
3465 btrfs_tree_unlock(left);
3466 free_extent_buffer(left);
3467 path->nodes[0] = right;
3468 path->slots[0] = 0;
3469 path->slots[1]++;
3470 return 0;
3471 }
3472
3473 return __push_leaf_right(trans, path, min_data_size, empty, right,
3474 free_space, left_nritems, min_slot);
3475 out_unlock:
3476 btrfs_tree_unlock(right);
3477 free_extent_buffer(right);
3478 return 1;
3479 }
3480
3481 /*
3482 * push some data in the path leaf to the left, trying to free up at
3483 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3484 *
3485 * max_slot can put a limit on how far into the leaf we'll push items. The
3486 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3487 * items
3488 */
__push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * left,int free_space,u32 right_nritems,u32 max_slot)3489 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3490 struct btrfs_path *path, int data_size,
3491 int empty, struct extent_buffer *left,
3492 int free_space, u32 right_nritems,
3493 u32 max_slot)
3494 {
3495 struct btrfs_fs_info *fs_info = left->fs_info;
3496 struct btrfs_disk_key disk_key;
3497 struct extent_buffer *right = path->nodes[0];
3498 int i;
3499 int push_space = 0;
3500 int push_items = 0;
3501 u32 old_left_nritems;
3502 u32 nr;
3503 int ret = 0;
3504 u32 this_item_size;
3505 u32 old_left_item_size;
3506 struct btrfs_map_token token;
3507
3508 if (empty)
3509 nr = min(right_nritems, max_slot);
3510 else
3511 nr = min(right_nritems - 1, max_slot);
3512
3513 for (i = 0; i < nr; i++) {
3514 if (!empty && push_items > 0) {
3515 if (path->slots[0] < i)
3516 break;
3517 if (path->slots[0] == i) {
3518 int space = btrfs_leaf_free_space(right);
3519
3520 if (space + push_space * 2 > free_space)
3521 break;
3522 }
3523 }
3524
3525 if (path->slots[0] == i)
3526 push_space += data_size;
3527
3528 this_item_size = btrfs_item_size(right, i);
3529 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3530 free_space)
3531 break;
3532
3533 push_items++;
3534 push_space += this_item_size + sizeof(struct btrfs_item);
3535 }
3536
3537 if (push_items == 0) {
3538 ret = 1;
3539 goto out;
3540 }
3541 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3542
3543 /* push data from right to left */
3544 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
3545
3546 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3547 btrfs_item_offset(right, push_items - 1);
3548
3549 copy_leaf_data(left, right, leaf_data_end(left) - push_space,
3550 btrfs_item_offset(right, push_items - 1), push_space);
3551 old_left_nritems = btrfs_header_nritems(left);
3552 BUG_ON(old_left_nritems <= 0);
3553
3554 btrfs_init_map_token(&token, left);
3555 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3556 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3557 u32 ioff;
3558
3559 ioff = btrfs_token_item_offset(&token, i);
3560 btrfs_set_token_item_offset(&token, i,
3561 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3562 }
3563 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3564
3565 /* fixup right node */
3566 if (push_items > right_nritems)
3567 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3568 right_nritems);
3569
3570 if (push_items < right_nritems) {
3571 push_space = btrfs_item_offset(right, push_items - 1) -
3572 leaf_data_end(right);
3573 memmove_leaf_data(right,
3574 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3575 leaf_data_end(right), push_space);
3576
3577 memmove_leaf_items(right, 0, push_items,
3578 btrfs_header_nritems(right) - push_items);
3579 }
3580
3581 btrfs_init_map_token(&token, right);
3582 right_nritems -= push_items;
3583 btrfs_set_header_nritems(right, right_nritems);
3584 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3585 for (i = 0; i < right_nritems; i++) {
3586 push_space = push_space - btrfs_token_item_size(&token, i);
3587 btrfs_set_token_item_offset(&token, i, push_space);
3588 }
3589
3590 btrfs_mark_buffer_dirty(trans, left);
3591 if (right_nritems)
3592 btrfs_mark_buffer_dirty(trans, right);
3593 else
3594 btrfs_clear_buffer_dirty(trans, right);
3595
3596 btrfs_item_key(right, &disk_key, 0);
3597 fixup_low_keys(trans, path, &disk_key, 1);
3598
3599 /* then fixup the leaf pointer in the path */
3600 if (path->slots[0] < push_items) {
3601 path->slots[0] += old_left_nritems;
3602 btrfs_tree_unlock(path->nodes[0]);
3603 free_extent_buffer(path->nodes[0]);
3604 path->nodes[0] = left;
3605 path->slots[1] -= 1;
3606 } else {
3607 btrfs_tree_unlock(left);
3608 free_extent_buffer(left);
3609 path->slots[0] -= push_items;
3610 }
3611 BUG_ON(path->slots[0] < 0);
3612 return ret;
3613 out:
3614 btrfs_tree_unlock(left);
3615 free_extent_buffer(left);
3616 return ret;
3617 }
3618
3619 /*
3620 * push some data in the path leaf to the left, trying to free up at
3621 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3622 *
3623 * max_slot can put a limit on how far into the leaf we'll push items. The
3624 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3625 * items
3626 */
push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 max_slot)3627 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3628 *root, struct btrfs_path *path, int min_data_size,
3629 int data_size, int empty, u32 max_slot)
3630 {
3631 struct extent_buffer *right = path->nodes[0];
3632 struct extent_buffer *left;
3633 int slot;
3634 int free_space;
3635 u32 right_nritems;
3636 int ret = 0;
3637
3638 slot = path->slots[1];
3639 if (slot == 0)
3640 return 1;
3641 if (!path->nodes[1])
3642 return 1;
3643
3644 right_nritems = btrfs_header_nritems(right);
3645 if (right_nritems == 0)
3646 return 1;
3647
3648 btrfs_assert_tree_write_locked(path->nodes[1]);
3649
3650 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3651 if (IS_ERR(left))
3652 return PTR_ERR(left);
3653
3654 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3655
3656 free_space = btrfs_leaf_free_space(left);
3657 if (free_space < data_size) {
3658 ret = 1;
3659 goto out;
3660 }
3661
3662 ret = btrfs_cow_block(trans, root, left,
3663 path->nodes[1], slot - 1, &left,
3664 BTRFS_NESTING_LEFT_COW);
3665 if (ret) {
3666 /* we hit -ENOSPC, but it isn't fatal here */
3667 if (ret == -ENOSPC)
3668 ret = 1;
3669 goto out;
3670 }
3671
3672 if (check_sibling_keys(left, right)) {
3673 ret = -EUCLEAN;
3674 btrfs_abort_transaction(trans, ret);
3675 goto out;
3676 }
3677 return __push_leaf_left(trans, path, min_data_size, empty, left,
3678 free_space, right_nritems, max_slot);
3679 out:
3680 btrfs_tree_unlock(left);
3681 free_extent_buffer(left);
3682 return ret;
3683 }
3684
3685 /*
3686 * split the path's leaf in two, making sure there is at least data_size
3687 * available for the resulting leaf level of the path.
3688 */
copy_for_split(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct extent_buffer * l,struct extent_buffer * right,int slot,int mid,int nritems)3689 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
3690 struct btrfs_path *path,
3691 struct extent_buffer *l,
3692 struct extent_buffer *right,
3693 int slot, int mid, int nritems)
3694 {
3695 struct btrfs_fs_info *fs_info = trans->fs_info;
3696 int data_copy_size;
3697 int rt_data_off;
3698 int i;
3699 int ret;
3700 struct btrfs_disk_key disk_key;
3701 struct btrfs_map_token token;
3702
3703 nritems = nritems - mid;
3704 btrfs_set_header_nritems(right, nritems);
3705 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3706
3707 copy_leaf_items(right, l, 0, mid, nritems);
3708
3709 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
3710 leaf_data_end(l), data_copy_size);
3711
3712 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3713
3714 btrfs_init_map_token(&token, right);
3715 for (i = 0; i < nritems; i++) {
3716 u32 ioff;
3717
3718 ioff = btrfs_token_item_offset(&token, i);
3719 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3720 }
3721
3722 btrfs_set_header_nritems(l, mid);
3723 btrfs_item_key(right, &disk_key, 0);
3724 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3725 if (ret < 0)
3726 return ret;
3727
3728 btrfs_mark_buffer_dirty(trans, right);
3729 btrfs_mark_buffer_dirty(trans, l);
3730 BUG_ON(path->slots[0] != slot);
3731
3732 if (mid <= slot) {
3733 btrfs_tree_unlock(path->nodes[0]);
3734 free_extent_buffer(path->nodes[0]);
3735 path->nodes[0] = right;
3736 path->slots[0] -= mid;
3737 path->slots[1] += 1;
3738 } else {
3739 btrfs_tree_unlock(right);
3740 free_extent_buffer(right);
3741 }
3742
3743 BUG_ON(path->slots[0] < 0);
3744
3745 return 0;
3746 }
3747
3748 /*
3749 * double splits happen when we need to insert a big item in the middle
3750 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3751 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3752 * A B C
3753 *
3754 * We avoid this by trying to push the items on either side of our target
3755 * into the adjacent leaves. If all goes well we can avoid the double split
3756 * completely.
3757 */
push_for_double_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int data_size)3758 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3759 struct btrfs_root *root,
3760 struct btrfs_path *path,
3761 int data_size)
3762 {
3763 int ret;
3764 int progress = 0;
3765 int slot;
3766 u32 nritems;
3767 int space_needed = data_size;
3768
3769 slot = path->slots[0];
3770 if (slot < btrfs_header_nritems(path->nodes[0]))
3771 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3772
3773 /*
3774 * try to push all the items after our slot into the
3775 * right leaf
3776 */
3777 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3778 if (ret < 0)
3779 return ret;
3780
3781 if (ret == 0)
3782 progress++;
3783
3784 nritems = btrfs_header_nritems(path->nodes[0]);
3785 /*
3786 * our goal is to get our slot at the start or end of a leaf. If
3787 * we've done so we're done
3788 */
3789 if (path->slots[0] == 0 || path->slots[0] == nritems)
3790 return 0;
3791
3792 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3793 return 0;
3794
3795 /* try to push all the items before our slot into the next leaf */
3796 slot = path->slots[0];
3797 space_needed = data_size;
3798 if (slot > 0)
3799 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3800 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3801 if (ret < 0)
3802 return ret;
3803
3804 if (ret == 0)
3805 progress++;
3806
3807 if (progress)
3808 return 0;
3809 return 1;
3810 }
3811
3812 /*
3813 * split the path's leaf in two, making sure there is at least data_size
3814 * available for the resulting leaf level of the path.
3815 *
3816 * returns 0 if all went well and < 0 on failure.
3817 */
split_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * ins_key,struct btrfs_path * path,int data_size,int extend)3818 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root,
3820 const struct btrfs_key *ins_key,
3821 struct btrfs_path *path, int data_size,
3822 int extend)
3823 {
3824 struct btrfs_disk_key disk_key;
3825 struct extent_buffer *l;
3826 u32 nritems;
3827 int mid;
3828 int slot;
3829 struct extent_buffer *right;
3830 struct btrfs_fs_info *fs_info = root->fs_info;
3831 int ret = 0;
3832 int wret;
3833 int split;
3834 int num_doubles = 0;
3835 int tried_avoid_double = 0;
3836
3837 l = path->nodes[0];
3838 slot = path->slots[0];
3839 if (extend && data_size + btrfs_item_size(l, slot) +
3840 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3841 return -EOVERFLOW;
3842
3843 /* first try to make some room by pushing left and right */
3844 if (data_size && path->nodes[1]) {
3845 int space_needed = data_size;
3846
3847 if (slot < btrfs_header_nritems(l))
3848 space_needed -= btrfs_leaf_free_space(l);
3849
3850 wret = push_leaf_right(trans, root, path, space_needed,
3851 space_needed, 0, 0);
3852 if (wret < 0)
3853 return wret;
3854 if (wret) {
3855 space_needed = data_size;
3856 if (slot > 0)
3857 space_needed -= btrfs_leaf_free_space(l);
3858 wret = push_leaf_left(trans, root, path, space_needed,
3859 space_needed, 0, (u32)-1);
3860 if (wret < 0)
3861 return wret;
3862 }
3863 l = path->nodes[0];
3864
3865 /* did the pushes work? */
3866 if (btrfs_leaf_free_space(l) >= data_size)
3867 return 0;
3868 }
3869
3870 if (!path->nodes[1]) {
3871 ret = insert_new_root(trans, root, path, 1);
3872 if (ret)
3873 return ret;
3874 }
3875 again:
3876 split = 1;
3877 l = path->nodes[0];
3878 slot = path->slots[0];
3879 nritems = btrfs_header_nritems(l);
3880 mid = (nritems + 1) / 2;
3881
3882 if (mid <= slot) {
3883 if (nritems == 1 ||
3884 leaf_space_used(l, mid, nritems - mid) + data_size >
3885 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3886 if (slot >= nritems) {
3887 split = 0;
3888 } else {
3889 mid = slot;
3890 if (mid != nritems &&
3891 leaf_space_used(l, mid, nritems - mid) +
3892 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3893 if (data_size && !tried_avoid_double)
3894 goto push_for_double;
3895 split = 2;
3896 }
3897 }
3898 }
3899 } else {
3900 if (leaf_space_used(l, 0, mid) + data_size >
3901 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3902 if (!extend && data_size && slot == 0) {
3903 split = 0;
3904 } else if ((extend || !data_size) && slot == 0) {
3905 mid = 1;
3906 } else {
3907 mid = slot;
3908 if (mid != nritems &&
3909 leaf_space_used(l, mid, nritems - mid) +
3910 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3911 if (data_size && !tried_avoid_double)
3912 goto push_for_double;
3913 split = 2;
3914 }
3915 }
3916 }
3917 }
3918
3919 if (split == 0)
3920 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3921 else
3922 btrfs_item_key(l, &disk_key, mid);
3923
3924 /*
3925 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3926 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3927 * subclasses, which is 8 at the time of this patch, and we've maxed it
3928 * out. In the future we could add a
3929 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3930 * use BTRFS_NESTING_NEW_ROOT.
3931 */
3932 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3933 &disk_key, 0, l->start, 0,
3934 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3935 BTRFS_NESTING_SPLIT);
3936 if (IS_ERR(right))
3937 return PTR_ERR(right);
3938
3939 root_add_used(root, fs_info->nodesize);
3940
3941 if (split == 0) {
3942 if (mid <= slot) {
3943 btrfs_set_header_nritems(right, 0);
3944 ret = insert_ptr(trans, path, &disk_key,
3945 right->start, path->slots[1] + 1, 1);
3946 if (ret < 0) {
3947 btrfs_tree_unlock(right);
3948 free_extent_buffer(right);
3949 return ret;
3950 }
3951 btrfs_tree_unlock(path->nodes[0]);
3952 free_extent_buffer(path->nodes[0]);
3953 path->nodes[0] = right;
3954 path->slots[0] = 0;
3955 path->slots[1] += 1;
3956 } else {
3957 btrfs_set_header_nritems(right, 0);
3958 ret = insert_ptr(trans, path, &disk_key,
3959 right->start, path->slots[1], 1);
3960 if (ret < 0) {
3961 btrfs_tree_unlock(right);
3962 free_extent_buffer(right);
3963 return ret;
3964 }
3965 btrfs_tree_unlock(path->nodes[0]);
3966 free_extent_buffer(path->nodes[0]);
3967 path->nodes[0] = right;
3968 path->slots[0] = 0;
3969 if (path->slots[1] == 0)
3970 fixup_low_keys(trans, path, &disk_key, 1);
3971 }
3972 /*
3973 * We create a new leaf 'right' for the required ins_len and
3974 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3975 * the content of ins_len to 'right'.
3976 */
3977 return ret;
3978 }
3979
3980 ret = copy_for_split(trans, path, l, right, slot, mid, nritems);
3981 if (ret < 0) {
3982 btrfs_tree_unlock(right);
3983 free_extent_buffer(right);
3984 return ret;
3985 }
3986
3987 if (split == 2) {
3988 BUG_ON(num_doubles != 0);
3989 num_doubles++;
3990 goto again;
3991 }
3992
3993 return 0;
3994
3995 push_for_double:
3996 push_for_double_split(trans, root, path, data_size);
3997 tried_avoid_double = 1;
3998 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3999 return 0;
4000 goto again;
4001 }
4002
setup_leaf_for_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int ins_len)4003 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4004 struct btrfs_root *root,
4005 struct btrfs_path *path, int ins_len)
4006 {
4007 struct btrfs_key key;
4008 struct extent_buffer *leaf;
4009 struct btrfs_file_extent_item *fi;
4010 u64 extent_len = 0;
4011 u32 item_size;
4012 int ret;
4013
4014 leaf = path->nodes[0];
4015 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4016
4017 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4018 key.type != BTRFS_EXTENT_CSUM_KEY);
4019
4020 if (btrfs_leaf_free_space(leaf) >= ins_len)
4021 return 0;
4022
4023 item_size = btrfs_item_size(leaf, path->slots[0]);
4024 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4025 fi = btrfs_item_ptr(leaf, path->slots[0],
4026 struct btrfs_file_extent_item);
4027 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4028 }
4029 btrfs_release_path(path);
4030
4031 path->keep_locks = 1;
4032 path->search_for_split = 1;
4033 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4034 path->search_for_split = 0;
4035 if (ret > 0)
4036 ret = -EAGAIN;
4037 if (ret < 0)
4038 goto err;
4039
4040 ret = -EAGAIN;
4041 leaf = path->nodes[0];
4042 /* if our item isn't there, return now */
4043 if (item_size != btrfs_item_size(leaf, path->slots[0]))
4044 goto err;
4045
4046 /* the leaf has changed, it now has room. return now */
4047 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4048 goto err;
4049
4050 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4051 fi = btrfs_item_ptr(leaf, path->slots[0],
4052 struct btrfs_file_extent_item);
4053 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4054 goto err;
4055 }
4056
4057 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4058 if (ret)
4059 goto err;
4060
4061 path->keep_locks = 0;
4062 btrfs_unlock_up_safe(path, 1);
4063 return 0;
4064 err:
4065 path->keep_locks = 0;
4066 return ret;
4067 }
4068
split_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4069 static noinline int split_item(struct btrfs_trans_handle *trans,
4070 struct btrfs_path *path,
4071 const struct btrfs_key *new_key,
4072 unsigned long split_offset)
4073 {
4074 struct extent_buffer *leaf;
4075 int orig_slot, slot;
4076 char *buf;
4077 u32 nritems;
4078 u32 item_size;
4079 u32 orig_offset;
4080 struct btrfs_disk_key disk_key;
4081
4082 leaf = path->nodes[0];
4083 /*
4084 * Shouldn't happen because the caller must have previously called
4085 * setup_leaf_for_split() to make room for the new item in the leaf.
4086 */
4087 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)))
4088 return -ENOSPC;
4089
4090 orig_slot = path->slots[0];
4091 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
4092 item_size = btrfs_item_size(leaf, path->slots[0]);
4093
4094 buf = kmalloc(item_size, GFP_NOFS);
4095 if (!buf)
4096 return -ENOMEM;
4097
4098 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4099 path->slots[0]), item_size);
4100
4101 slot = path->slots[0] + 1;
4102 nritems = btrfs_header_nritems(leaf);
4103 if (slot != nritems) {
4104 /* shift the items */
4105 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
4106 }
4107
4108 btrfs_cpu_key_to_disk(&disk_key, new_key);
4109 btrfs_set_item_key(leaf, &disk_key, slot);
4110
4111 btrfs_set_item_offset(leaf, slot, orig_offset);
4112 btrfs_set_item_size(leaf, slot, item_size - split_offset);
4113
4114 btrfs_set_item_offset(leaf, orig_slot,
4115 orig_offset + item_size - split_offset);
4116 btrfs_set_item_size(leaf, orig_slot, split_offset);
4117
4118 btrfs_set_header_nritems(leaf, nritems + 1);
4119
4120 /* write the data for the start of the original item */
4121 write_extent_buffer(leaf, buf,
4122 btrfs_item_ptr_offset(leaf, path->slots[0]),
4123 split_offset);
4124
4125 /* write the data for the new item */
4126 write_extent_buffer(leaf, buf + split_offset,
4127 btrfs_item_ptr_offset(leaf, slot),
4128 item_size - split_offset);
4129 btrfs_mark_buffer_dirty(trans, leaf);
4130
4131 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4132 kfree(buf);
4133 return 0;
4134 }
4135
4136 /*
4137 * This function splits a single item into two items,
4138 * giving 'new_key' to the new item and splitting the
4139 * old one at split_offset (from the start of the item).
4140 *
4141 * The path may be released by this operation. After
4142 * the split, the path is pointing to the old item. The
4143 * new item is going to be in the same node as the old one.
4144 *
4145 * Note, the item being split must be smaller enough to live alone on
4146 * a tree block with room for one extra struct btrfs_item
4147 *
4148 * This allows us to split the item in place, keeping a lock on the
4149 * leaf the entire time.
4150 */
btrfs_split_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4151 int btrfs_split_item(struct btrfs_trans_handle *trans,
4152 struct btrfs_root *root,
4153 struct btrfs_path *path,
4154 const struct btrfs_key *new_key,
4155 unsigned long split_offset)
4156 {
4157 int ret;
4158 ret = setup_leaf_for_split(trans, root, path,
4159 sizeof(struct btrfs_item));
4160 if (ret)
4161 return ret;
4162
4163 ret = split_item(trans, path, new_key, split_offset);
4164 return ret;
4165 }
4166
4167 /*
4168 * make the item pointed to by the path smaller. new_size indicates
4169 * how small to make it, and from_end tells us if we just chop bytes
4170 * off the end of the item or if we shift the item to chop bytes off
4171 * the front.
4172 */
btrfs_truncate_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,u32 new_size,int from_end)4173 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4174 struct btrfs_path *path, u32 new_size, int from_end)
4175 {
4176 int slot;
4177 struct extent_buffer *leaf;
4178 u32 nritems;
4179 unsigned int data_end;
4180 unsigned int old_data_start;
4181 unsigned int old_size;
4182 unsigned int size_diff;
4183 int i;
4184 struct btrfs_map_token token;
4185
4186 leaf = path->nodes[0];
4187 slot = path->slots[0];
4188
4189 old_size = btrfs_item_size(leaf, slot);
4190 if (old_size == new_size)
4191 return;
4192
4193 nritems = btrfs_header_nritems(leaf);
4194 data_end = leaf_data_end(leaf);
4195
4196 old_data_start = btrfs_item_offset(leaf, slot);
4197
4198 size_diff = old_size - new_size;
4199
4200 BUG_ON(slot < 0);
4201 BUG_ON(slot >= nritems);
4202
4203 /*
4204 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4205 */
4206 /* first correct the data pointers */
4207 btrfs_init_map_token(&token, leaf);
4208 for (i = slot; i < nritems; i++) {
4209 u32 ioff;
4210
4211 ioff = btrfs_token_item_offset(&token, i);
4212 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
4213 }
4214
4215 /* shift the data */
4216 if (from_end) {
4217 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4218 old_data_start + new_size - data_end);
4219 } else {
4220 struct btrfs_disk_key disk_key;
4221 u64 offset;
4222
4223 btrfs_item_key(leaf, &disk_key, slot);
4224
4225 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4226 unsigned long ptr;
4227 struct btrfs_file_extent_item *fi;
4228
4229 fi = btrfs_item_ptr(leaf, slot,
4230 struct btrfs_file_extent_item);
4231 fi = (struct btrfs_file_extent_item *)(
4232 (unsigned long)fi - size_diff);
4233
4234 if (btrfs_file_extent_type(leaf, fi) ==
4235 BTRFS_FILE_EXTENT_INLINE) {
4236 ptr = btrfs_item_ptr_offset(leaf, slot);
4237 memmove_extent_buffer(leaf, ptr,
4238 (unsigned long)fi,
4239 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4240 }
4241 }
4242
4243 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4244 old_data_start - data_end);
4245
4246 offset = btrfs_disk_key_offset(&disk_key);
4247 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4248 btrfs_set_item_key(leaf, &disk_key, slot);
4249 if (slot == 0)
4250 fixup_low_keys(trans, path, &disk_key, 1);
4251 }
4252
4253 btrfs_set_item_size(leaf, slot, new_size);
4254 btrfs_mark_buffer_dirty(trans, leaf);
4255
4256 if (btrfs_leaf_free_space(leaf) < 0) {
4257 btrfs_print_leaf(leaf);
4258 BUG();
4259 }
4260 }
4261
4262 /*
4263 * make the item pointed to by the path bigger, data_size is the added size.
4264 */
btrfs_extend_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,u32 data_size)4265 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4266 struct btrfs_path *path, u32 data_size)
4267 {
4268 int slot;
4269 struct extent_buffer *leaf;
4270 u32 nritems;
4271 unsigned int data_end;
4272 unsigned int old_data;
4273 unsigned int old_size;
4274 int i;
4275 struct btrfs_map_token token;
4276
4277 leaf = path->nodes[0];
4278
4279 nritems = btrfs_header_nritems(leaf);
4280 data_end = leaf_data_end(leaf);
4281
4282 if (btrfs_leaf_free_space(leaf) < data_size) {
4283 btrfs_print_leaf(leaf);
4284 BUG();
4285 }
4286 slot = path->slots[0];
4287 old_data = btrfs_item_data_end(leaf, slot);
4288
4289 BUG_ON(slot < 0);
4290 if (slot >= nritems) {
4291 btrfs_print_leaf(leaf);
4292 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4293 slot, nritems);
4294 BUG();
4295 }
4296
4297 /*
4298 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4299 */
4300 /* first correct the data pointers */
4301 btrfs_init_map_token(&token, leaf);
4302 for (i = slot; i < nritems; i++) {
4303 u32 ioff;
4304
4305 ioff = btrfs_token_item_offset(&token, i);
4306 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4307 }
4308
4309 /* shift the data */
4310 memmove_leaf_data(leaf, data_end - data_size, data_end,
4311 old_data - data_end);
4312
4313 data_end = old_data;
4314 old_size = btrfs_item_size(leaf, slot);
4315 btrfs_set_item_size(leaf, slot, old_size + data_size);
4316 btrfs_mark_buffer_dirty(trans, leaf);
4317
4318 if (btrfs_leaf_free_space(leaf) < 0) {
4319 btrfs_print_leaf(leaf);
4320 BUG();
4321 }
4322 }
4323
4324 /*
4325 * Make space in the node before inserting one or more items.
4326 *
4327 * @trans: transaction handle
4328 * @root: root we are inserting items to
4329 * @path: points to the leaf/slot where we are going to insert new items
4330 * @batch: information about the batch of items to insert
4331 *
4332 * Main purpose is to save stack depth by doing the bulk of the work in a
4333 * function that doesn't call btrfs_search_slot
4334 */
setup_items_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4335 static void setup_items_for_insert(struct btrfs_trans_handle *trans,
4336 struct btrfs_root *root, struct btrfs_path *path,
4337 const struct btrfs_item_batch *batch)
4338 {
4339 struct btrfs_fs_info *fs_info = root->fs_info;
4340 int i;
4341 u32 nritems;
4342 unsigned int data_end;
4343 struct btrfs_disk_key disk_key;
4344 struct extent_buffer *leaf;
4345 int slot;
4346 struct btrfs_map_token token;
4347 u32 total_size;
4348
4349 /*
4350 * Before anything else, update keys in the parent and other ancestors
4351 * if needed, then release the write locks on them, so that other tasks
4352 * can use them while we modify the leaf.
4353 */
4354 if (path->slots[0] == 0) {
4355 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4356 fixup_low_keys(trans, path, &disk_key, 1);
4357 }
4358 btrfs_unlock_up_safe(path, 1);
4359
4360 leaf = path->nodes[0];
4361 slot = path->slots[0];
4362
4363 nritems = btrfs_header_nritems(leaf);
4364 data_end = leaf_data_end(leaf);
4365 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4366
4367 if (btrfs_leaf_free_space(leaf) < total_size) {
4368 btrfs_print_leaf(leaf);
4369 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4370 total_size, btrfs_leaf_free_space(leaf));
4371 BUG();
4372 }
4373
4374 btrfs_init_map_token(&token, leaf);
4375 if (slot != nritems) {
4376 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4377
4378 if (old_data < data_end) {
4379 btrfs_print_leaf(leaf);
4380 btrfs_crit(fs_info,
4381 "item at slot %d with data offset %u beyond data end of leaf %u",
4382 slot, old_data, data_end);
4383 BUG();
4384 }
4385 /*
4386 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4387 */
4388 /* first correct the data pointers */
4389 for (i = slot; i < nritems; i++) {
4390 u32 ioff;
4391
4392 ioff = btrfs_token_item_offset(&token, i);
4393 btrfs_set_token_item_offset(&token, i,
4394 ioff - batch->total_data_size);
4395 }
4396 /* shift the items */
4397 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
4398
4399 /* shift the data */
4400 memmove_leaf_data(leaf, data_end - batch->total_data_size,
4401 data_end, old_data - data_end);
4402 data_end = old_data;
4403 }
4404
4405 /* setup the item for the new data */
4406 for (i = 0; i < batch->nr; i++) {
4407 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4408 btrfs_set_item_key(leaf, &disk_key, slot + i);
4409 data_end -= batch->data_sizes[i];
4410 btrfs_set_token_item_offset(&token, slot + i, data_end);
4411 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4412 }
4413
4414 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4415 btrfs_mark_buffer_dirty(trans, leaf);
4416
4417 if (btrfs_leaf_free_space(leaf) < 0) {
4418 btrfs_print_leaf(leaf);
4419 BUG();
4420 }
4421 }
4422
4423 /*
4424 * Insert a new item into a leaf.
4425 *
4426 * @trans: Transaction handle.
4427 * @root: The root of the btree.
4428 * @path: A path pointing to the target leaf and slot.
4429 * @key: The key of the new item.
4430 * @data_size: The size of the data associated with the new key.
4431 */
btrfs_setup_item_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * key,u32 data_size)4432 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
4433 struct btrfs_root *root,
4434 struct btrfs_path *path,
4435 const struct btrfs_key *key,
4436 u32 data_size)
4437 {
4438 struct btrfs_item_batch batch;
4439
4440 batch.keys = key;
4441 batch.data_sizes = &data_size;
4442 batch.total_data_size = data_size;
4443 batch.nr = 1;
4444
4445 setup_items_for_insert(trans, root, path, &batch);
4446 }
4447
4448 /*
4449 * Given a key and some data, insert items into the tree.
4450 * This does all the path init required, making room in the tree if needed.
4451 */
btrfs_insert_empty_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4452 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4453 struct btrfs_root *root,
4454 struct btrfs_path *path,
4455 const struct btrfs_item_batch *batch)
4456 {
4457 int ret = 0;
4458 int slot;
4459 u32 total_size;
4460
4461 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4462 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4463 if (ret == 0)
4464 return -EEXIST;
4465 if (ret < 0)
4466 return ret;
4467
4468 slot = path->slots[0];
4469 BUG_ON(slot < 0);
4470
4471 setup_items_for_insert(trans, root, path, batch);
4472 return 0;
4473 }
4474
4475 /*
4476 * Given a key and some data, insert an item into the tree.
4477 * This does all the path init required, making room in the tree if needed.
4478 */
btrfs_insert_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * cpu_key,void * data,u32 data_size)4479 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4480 const struct btrfs_key *cpu_key, void *data,
4481 u32 data_size)
4482 {
4483 int ret = 0;
4484 struct btrfs_path *path;
4485 struct extent_buffer *leaf;
4486 unsigned long ptr;
4487
4488 path = btrfs_alloc_path();
4489 if (!path)
4490 return -ENOMEM;
4491 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4492 if (!ret) {
4493 leaf = path->nodes[0];
4494 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4495 write_extent_buffer(leaf, data, ptr, data_size);
4496 btrfs_mark_buffer_dirty(trans, leaf);
4497 }
4498 btrfs_free_path(path);
4499 return ret;
4500 }
4501
4502 /*
4503 * This function duplicates an item, giving 'new_key' to the new item.
4504 * It guarantees both items live in the same tree leaf and the new item is
4505 * contiguous with the original item.
4506 *
4507 * This allows us to split a file extent in place, keeping a lock on the leaf
4508 * the entire time.
4509 */
btrfs_duplicate_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key)4510 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4511 struct btrfs_root *root,
4512 struct btrfs_path *path,
4513 const struct btrfs_key *new_key)
4514 {
4515 struct extent_buffer *leaf;
4516 int ret;
4517 u32 item_size;
4518
4519 leaf = path->nodes[0];
4520 item_size = btrfs_item_size(leaf, path->slots[0]);
4521 ret = setup_leaf_for_split(trans, root, path,
4522 item_size + sizeof(struct btrfs_item));
4523 if (ret)
4524 return ret;
4525
4526 path->slots[0]++;
4527 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
4528 leaf = path->nodes[0];
4529 memcpy_extent_buffer(leaf,
4530 btrfs_item_ptr_offset(leaf, path->slots[0]),
4531 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4532 item_size);
4533 return 0;
4534 }
4535
4536 /*
4537 * delete the pointer from a given node.
4538 *
4539 * the tree should have been previously balanced so the deletion does not
4540 * empty a node.
4541 *
4542 * This is exported for use inside btrfs-progs, don't un-export it.
4543 */
btrfs_del_ptr(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level,int slot)4544 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4545 struct btrfs_path *path, int level, int slot)
4546 {
4547 struct extent_buffer *parent = path->nodes[level];
4548 u32 nritems;
4549 int ret;
4550
4551 nritems = btrfs_header_nritems(parent);
4552 if (slot != nritems - 1) {
4553 if (level) {
4554 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4555 slot + 1, nritems - slot - 1);
4556 if (ret < 0) {
4557 btrfs_abort_transaction(trans, ret);
4558 return ret;
4559 }
4560 }
4561 memmove_extent_buffer(parent,
4562 btrfs_node_key_ptr_offset(parent, slot),
4563 btrfs_node_key_ptr_offset(parent, slot + 1),
4564 sizeof(struct btrfs_key_ptr) *
4565 (nritems - slot - 1));
4566 } else if (level) {
4567 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4568 BTRFS_MOD_LOG_KEY_REMOVE);
4569 if (ret < 0) {
4570 btrfs_abort_transaction(trans, ret);
4571 return ret;
4572 }
4573 }
4574
4575 nritems--;
4576 btrfs_set_header_nritems(parent, nritems);
4577 if (nritems == 0 && parent == root->node) {
4578 BUG_ON(btrfs_header_level(root->node) != 1);
4579 /* just turn the root into a leaf and break */
4580 btrfs_set_header_level(root->node, 0);
4581 } else if (slot == 0) {
4582 struct btrfs_disk_key disk_key;
4583
4584 btrfs_node_key(parent, &disk_key, 0);
4585 fixup_low_keys(trans, path, &disk_key, level + 1);
4586 }
4587 btrfs_mark_buffer_dirty(trans, parent);
4588 return 0;
4589 }
4590
4591 /*
4592 * a helper function to delete the leaf pointed to by path->slots[1] and
4593 * path->nodes[1].
4594 *
4595 * This deletes the pointer in path->nodes[1] and frees the leaf
4596 * block extent. zero is returned if it all worked out, < 0 otherwise.
4597 *
4598 * The path must have already been setup for deleting the leaf, including
4599 * all the proper balancing. path->nodes[1] must be locked.
4600 */
btrfs_del_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * leaf)4601 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
4602 struct btrfs_root *root,
4603 struct btrfs_path *path,
4604 struct extent_buffer *leaf)
4605 {
4606 int ret;
4607
4608 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4609 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]);
4610 if (ret < 0)
4611 return ret;
4612
4613 /*
4614 * btrfs_free_extent is expensive, we want to make sure we
4615 * aren't holding any locks when we call it
4616 */
4617 btrfs_unlock_up_safe(path, 0);
4618
4619 root_sub_used(root, leaf->len);
4620
4621 atomic_inc(&leaf->refs);
4622 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4623 free_extent_buffer_stale(leaf);
4624 if (ret < 0)
4625 btrfs_abort_transaction(trans, ret);
4626
4627 return ret;
4628 }
4629 /*
4630 * delete the item at the leaf level in path. If that empties
4631 * the leaf, remove it from the tree
4632 */
btrfs_del_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int slot,int nr)4633 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4634 struct btrfs_path *path, int slot, int nr)
4635 {
4636 struct btrfs_fs_info *fs_info = root->fs_info;
4637 struct extent_buffer *leaf;
4638 int ret = 0;
4639 int wret;
4640 u32 nritems;
4641
4642 leaf = path->nodes[0];
4643 nritems = btrfs_header_nritems(leaf);
4644
4645 if (slot + nr != nritems) {
4646 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4647 const int data_end = leaf_data_end(leaf);
4648 struct btrfs_map_token token;
4649 u32 dsize = 0;
4650 int i;
4651
4652 for (i = 0; i < nr; i++)
4653 dsize += btrfs_item_size(leaf, slot + i);
4654
4655 memmove_leaf_data(leaf, data_end + dsize, data_end,
4656 last_off - data_end);
4657
4658 btrfs_init_map_token(&token, leaf);
4659 for (i = slot + nr; i < nritems; i++) {
4660 u32 ioff;
4661
4662 ioff = btrfs_token_item_offset(&token, i);
4663 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4664 }
4665
4666 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
4667 }
4668 btrfs_set_header_nritems(leaf, nritems - nr);
4669 nritems -= nr;
4670
4671 /* delete the leaf if we've emptied it */
4672 if (nritems == 0) {
4673 if (leaf == root->node) {
4674 btrfs_set_header_level(leaf, 0);
4675 } else {
4676 btrfs_clear_buffer_dirty(trans, leaf);
4677 ret = btrfs_del_leaf(trans, root, path, leaf);
4678 if (ret < 0)
4679 return ret;
4680 }
4681 } else {
4682 int used = leaf_space_used(leaf, 0, nritems);
4683 if (slot == 0) {
4684 struct btrfs_disk_key disk_key;
4685
4686 btrfs_item_key(leaf, &disk_key, 0);
4687 fixup_low_keys(trans, path, &disk_key, 1);
4688 }
4689
4690 /*
4691 * Try to delete the leaf if it is mostly empty. We do this by
4692 * trying to move all its items into its left and right neighbours.
4693 * If we can't move all the items, then we don't delete it - it's
4694 * not ideal, but future insertions might fill the leaf with more
4695 * items, or items from other leaves might be moved later into our
4696 * leaf due to deletions on those leaves.
4697 */
4698 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4699 u32 min_push_space;
4700
4701 /* push_leaf_left fixes the path.
4702 * make sure the path still points to our leaf
4703 * for possible call to btrfs_del_ptr below
4704 */
4705 slot = path->slots[1];
4706 atomic_inc(&leaf->refs);
4707 /*
4708 * We want to be able to at least push one item to the
4709 * left neighbour leaf, and that's the first item.
4710 */
4711 min_push_space = sizeof(struct btrfs_item) +
4712 btrfs_item_size(leaf, 0);
4713 wret = push_leaf_left(trans, root, path, 0,
4714 min_push_space, 1, (u32)-1);
4715 if (wret < 0 && wret != -ENOSPC)
4716 ret = wret;
4717
4718 if (path->nodes[0] == leaf &&
4719 btrfs_header_nritems(leaf)) {
4720 /*
4721 * If we were not able to push all items from our
4722 * leaf to its left neighbour, then attempt to
4723 * either push all the remaining items to the
4724 * right neighbour or none. There's no advantage
4725 * in pushing only some items, instead of all, as
4726 * it's pointless to end up with a leaf having
4727 * too few items while the neighbours can be full
4728 * or nearly full.
4729 */
4730 nritems = btrfs_header_nritems(leaf);
4731 min_push_space = leaf_space_used(leaf, 0, nritems);
4732 wret = push_leaf_right(trans, root, path, 0,
4733 min_push_space, 1, 0);
4734 if (wret < 0 && wret != -ENOSPC)
4735 ret = wret;
4736 }
4737
4738 if (btrfs_header_nritems(leaf) == 0) {
4739 path->slots[1] = slot;
4740 ret = btrfs_del_leaf(trans, root, path, leaf);
4741 if (ret < 0)
4742 return ret;
4743 free_extent_buffer(leaf);
4744 ret = 0;
4745 } else {
4746 /* if we're still in the path, make sure
4747 * we're dirty. Otherwise, one of the
4748 * push_leaf functions must have already
4749 * dirtied this buffer
4750 */
4751 if (path->nodes[0] == leaf)
4752 btrfs_mark_buffer_dirty(trans, leaf);
4753 free_extent_buffer(leaf);
4754 }
4755 } else {
4756 btrfs_mark_buffer_dirty(trans, leaf);
4757 }
4758 }
4759 return ret;
4760 }
4761
4762 /*
4763 * A helper function to walk down the tree starting at min_key, and looking
4764 * for nodes or leaves that are have a minimum transaction id.
4765 * This is used by the btree defrag code, and tree logging
4766 *
4767 * This does not cow, but it does stuff the starting key it finds back
4768 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4769 * key and get a writable path.
4770 *
4771 * This honors path->lowest_level to prevent descent past a given level
4772 * of the tree.
4773 *
4774 * min_trans indicates the oldest transaction that you are interested
4775 * in walking through. Any nodes or leaves older than min_trans are
4776 * skipped over (without reading them).
4777 *
4778 * returns zero if something useful was found, < 0 on error and 1 if there
4779 * was nothing in the tree that matched the search criteria.
4780 */
btrfs_search_forward(struct btrfs_root * root,struct btrfs_key * min_key,struct btrfs_path * path,u64 min_trans)4781 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4782 struct btrfs_path *path,
4783 u64 min_trans)
4784 {
4785 struct extent_buffer *cur;
4786 struct btrfs_key found_key;
4787 int slot;
4788 int sret;
4789 u32 nritems;
4790 int level;
4791 int ret = 1;
4792 int keep_locks = path->keep_locks;
4793
4794 ASSERT(!path->nowait);
4795 path->keep_locks = 1;
4796 again:
4797 cur = btrfs_read_lock_root_node(root);
4798 level = btrfs_header_level(cur);
4799 WARN_ON(path->nodes[level]);
4800 path->nodes[level] = cur;
4801 path->locks[level] = BTRFS_READ_LOCK;
4802
4803 if (btrfs_header_generation(cur) < min_trans) {
4804 ret = 1;
4805 goto out;
4806 }
4807 while (1) {
4808 nritems = btrfs_header_nritems(cur);
4809 level = btrfs_header_level(cur);
4810 sret = btrfs_bin_search(cur, 0, min_key, &slot);
4811 if (sret < 0) {
4812 ret = sret;
4813 goto out;
4814 }
4815
4816 /* at the lowest level, we're done, setup the path and exit */
4817 if (level == path->lowest_level) {
4818 if (slot >= nritems)
4819 goto find_next_key;
4820 ret = 0;
4821 path->slots[level] = slot;
4822 btrfs_item_key_to_cpu(cur, &found_key, slot);
4823 goto out;
4824 }
4825 if (sret && slot > 0)
4826 slot--;
4827 /*
4828 * check this node pointer against the min_trans parameters.
4829 * If it is too old, skip to the next one.
4830 */
4831 while (slot < nritems) {
4832 u64 gen;
4833
4834 gen = btrfs_node_ptr_generation(cur, slot);
4835 if (gen < min_trans) {
4836 slot++;
4837 continue;
4838 }
4839 break;
4840 }
4841 find_next_key:
4842 /*
4843 * we didn't find a candidate key in this node, walk forward
4844 * and find another one
4845 */
4846 if (slot >= nritems) {
4847 path->slots[level] = slot;
4848 sret = btrfs_find_next_key(root, path, min_key, level,
4849 min_trans);
4850 if (sret == 0) {
4851 btrfs_release_path(path);
4852 goto again;
4853 } else {
4854 goto out;
4855 }
4856 }
4857 /* save our key for returning back */
4858 btrfs_node_key_to_cpu(cur, &found_key, slot);
4859 path->slots[level] = slot;
4860 if (level == path->lowest_level) {
4861 ret = 0;
4862 goto out;
4863 }
4864 cur = btrfs_read_node_slot(cur, slot);
4865 if (IS_ERR(cur)) {
4866 ret = PTR_ERR(cur);
4867 goto out;
4868 }
4869
4870 btrfs_tree_read_lock(cur);
4871
4872 path->locks[level - 1] = BTRFS_READ_LOCK;
4873 path->nodes[level - 1] = cur;
4874 unlock_up(path, level, 1, 0, NULL);
4875 }
4876 out:
4877 path->keep_locks = keep_locks;
4878 if (ret == 0) {
4879 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4880 memcpy(min_key, &found_key, sizeof(found_key));
4881 }
4882 return ret;
4883 }
4884
4885 /*
4886 * this is similar to btrfs_next_leaf, but does not try to preserve
4887 * and fixup the path. It looks for and returns the next key in the
4888 * tree based on the current path and the min_trans parameters.
4889 *
4890 * 0 is returned if another key is found, < 0 if there are any errors
4891 * and 1 is returned if there are no higher keys in the tree
4892 *
4893 * path->keep_locks should be set to 1 on the search made before
4894 * calling this function.
4895 */
btrfs_find_next_key(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,int level,u64 min_trans)4896 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4897 struct btrfs_key *key, int level, u64 min_trans)
4898 {
4899 int slot;
4900 struct extent_buffer *c;
4901
4902 WARN_ON(!path->keep_locks && !path->skip_locking);
4903 while (level < BTRFS_MAX_LEVEL) {
4904 if (!path->nodes[level])
4905 return 1;
4906
4907 slot = path->slots[level] + 1;
4908 c = path->nodes[level];
4909 next:
4910 if (slot >= btrfs_header_nritems(c)) {
4911 int ret;
4912 int orig_lowest;
4913 struct btrfs_key cur_key;
4914 if (level + 1 >= BTRFS_MAX_LEVEL ||
4915 !path->nodes[level + 1])
4916 return 1;
4917
4918 if (path->locks[level + 1] || path->skip_locking) {
4919 level++;
4920 continue;
4921 }
4922
4923 slot = btrfs_header_nritems(c) - 1;
4924 if (level == 0)
4925 btrfs_item_key_to_cpu(c, &cur_key, slot);
4926 else
4927 btrfs_node_key_to_cpu(c, &cur_key, slot);
4928
4929 orig_lowest = path->lowest_level;
4930 btrfs_release_path(path);
4931 path->lowest_level = level;
4932 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4933 0, 0);
4934 path->lowest_level = orig_lowest;
4935 if (ret < 0)
4936 return ret;
4937
4938 c = path->nodes[level];
4939 slot = path->slots[level];
4940 if (ret == 0)
4941 slot++;
4942 goto next;
4943 }
4944
4945 if (level == 0)
4946 btrfs_item_key_to_cpu(c, key, slot);
4947 else {
4948 u64 gen = btrfs_node_ptr_generation(c, slot);
4949
4950 if (gen < min_trans) {
4951 slot++;
4952 goto next;
4953 }
4954 btrfs_node_key_to_cpu(c, key, slot);
4955 }
4956 return 0;
4957 }
4958 return 1;
4959 }
4960
btrfs_next_old_leaf(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)4961 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4962 u64 time_seq)
4963 {
4964 int slot;
4965 int level;
4966 struct extent_buffer *c;
4967 struct extent_buffer *next;
4968 struct btrfs_fs_info *fs_info = root->fs_info;
4969 struct btrfs_key key;
4970 bool need_commit_sem = false;
4971 u32 nritems;
4972 int ret;
4973 int i;
4974
4975 /*
4976 * The nowait semantics are used only for write paths, where we don't
4977 * use the tree mod log and sequence numbers.
4978 */
4979 if (time_seq)
4980 ASSERT(!path->nowait);
4981
4982 nritems = btrfs_header_nritems(path->nodes[0]);
4983 if (nritems == 0)
4984 return 1;
4985
4986 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4987 again:
4988 level = 1;
4989 next = NULL;
4990 btrfs_release_path(path);
4991
4992 path->keep_locks = 1;
4993
4994 if (time_seq) {
4995 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4996 } else {
4997 if (path->need_commit_sem) {
4998 path->need_commit_sem = 0;
4999 need_commit_sem = true;
5000 if (path->nowait) {
5001 if (!down_read_trylock(&fs_info->commit_root_sem)) {
5002 ret = -EAGAIN;
5003 goto done;
5004 }
5005 } else {
5006 down_read(&fs_info->commit_root_sem);
5007 }
5008 }
5009 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5010 }
5011 path->keep_locks = 0;
5012
5013 if (ret < 0)
5014 goto done;
5015
5016 nritems = btrfs_header_nritems(path->nodes[0]);
5017 /*
5018 * by releasing the path above we dropped all our locks. A balance
5019 * could have added more items next to the key that used to be
5020 * at the very end of the block. So, check again here and
5021 * advance the path if there are now more items available.
5022 */
5023 if (nritems > 0 && path->slots[0] < nritems - 1) {
5024 if (ret == 0)
5025 path->slots[0]++;
5026 ret = 0;
5027 goto done;
5028 }
5029 /*
5030 * So the above check misses one case:
5031 * - after releasing the path above, someone has removed the item that
5032 * used to be at the very end of the block, and balance between leafs
5033 * gets another one with bigger key.offset to replace it.
5034 *
5035 * This one should be returned as well, or we can get leaf corruption
5036 * later(esp. in __btrfs_drop_extents()).
5037 *
5038 * And a bit more explanation about this check,
5039 * with ret > 0, the key isn't found, the path points to the slot
5040 * where it should be inserted, so the path->slots[0] item must be the
5041 * bigger one.
5042 */
5043 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5044 ret = 0;
5045 goto done;
5046 }
5047
5048 while (level < BTRFS_MAX_LEVEL) {
5049 if (!path->nodes[level]) {
5050 ret = 1;
5051 goto done;
5052 }
5053
5054 slot = path->slots[level] + 1;
5055 c = path->nodes[level];
5056 if (slot >= btrfs_header_nritems(c)) {
5057 level++;
5058 if (level == BTRFS_MAX_LEVEL) {
5059 ret = 1;
5060 goto done;
5061 }
5062 continue;
5063 }
5064
5065
5066 /*
5067 * Our current level is where we're going to start from, and to
5068 * make sure lockdep doesn't complain we need to drop our locks
5069 * and nodes from 0 to our current level.
5070 */
5071 for (i = 0; i < level; i++) {
5072 if (path->locks[level]) {
5073 btrfs_tree_read_unlock(path->nodes[i]);
5074 path->locks[i] = 0;
5075 }
5076 free_extent_buffer(path->nodes[i]);
5077 path->nodes[i] = NULL;
5078 }
5079
5080 next = c;
5081 ret = read_block_for_search(root, path, &next, level,
5082 slot, &key);
5083 if (ret == -EAGAIN && !path->nowait)
5084 goto again;
5085
5086 if (ret < 0) {
5087 btrfs_release_path(path);
5088 goto done;
5089 }
5090
5091 if (!path->skip_locking) {
5092 ret = btrfs_try_tree_read_lock(next);
5093 if (!ret && path->nowait) {
5094 ret = -EAGAIN;
5095 goto done;
5096 }
5097 if (!ret && time_seq) {
5098 /*
5099 * If we don't get the lock, we may be racing
5100 * with push_leaf_left, holding that lock while
5101 * itself waiting for the leaf we've currently
5102 * locked. To solve this situation, we give up
5103 * on our lock and cycle.
5104 */
5105 free_extent_buffer(next);
5106 btrfs_release_path(path);
5107 cond_resched();
5108 goto again;
5109 }
5110 if (!ret)
5111 btrfs_tree_read_lock(next);
5112 }
5113 break;
5114 }
5115 path->slots[level] = slot;
5116 while (1) {
5117 level--;
5118 path->nodes[level] = next;
5119 path->slots[level] = 0;
5120 if (!path->skip_locking)
5121 path->locks[level] = BTRFS_READ_LOCK;
5122 if (!level)
5123 break;
5124
5125 ret = read_block_for_search(root, path, &next, level,
5126 0, &key);
5127 if (ret == -EAGAIN && !path->nowait)
5128 goto again;
5129
5130 if (ret < 0) {
5131 btrfs_release_path(path);
5132 goto done;
5133 }
5134
5135 if (!path->skip_locking) {
5136 if (path->nowait) {
5137 if (!btrfs_try_tree_read_lock(next)) {
5138 ret = -EAGAIN;
5139 goto done;
5140 }
5141 } else {
5142 btrfs_tree_read_lock(next);
5143 }
5144 }
5145 }
5146 ret = 0;
5147 done:
5148 unlock_up(path, 0, 1, 0, NULL);
5149 if (need_commit_sem) {
5150 int ret2;
5151
5152 path->need_commit_sem = 1;
5153 ret2 = finish_need_commit_sem_search(path);
5154 up_read(&fs_info->commit_root_sem);
5155 if (ret2)
5156 ret = ret2;
5157 }
5158
5159 return ret;
5160 }
5161
btrfs_next_old_item(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)5162 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
5163 {
5164 path->slots[0]++;
5165 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
5166 return btrfs_next_old_leaf(root, path, time_seq);
5167 return 0;
5168 }
5169
5170 /*
5171 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5172 * searching until it gets past min_objectid or finds an item of 'type'
5173 *
5174 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5175 */
btrfs_previous_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid,int type)5176 int btrfs_previous_item(struct btrfs_root *root,
5177 struct btrfs_path *path, u64 min_objectid,
5178 int type)
5179 {
5180 struct btrfs_key found_key;
5181 struct extent_buffer *leaf;
5182 u32 nritems;
5183 int ret;
5184
5185 while (1) {
5186 if (path->slots[0] == 0) {
5187 ret = btrfs_prev_leaf(root, path);
5188 if (ret != 0)
5189 return ret;
5190 } else {
5191 path->slots[0]--;
5192 }
5193 leaf = path->nodes[0];
5194 nritems = btrfs_header_nritems(leaf);
5195 if (nritems == 0)
5196 return 1;
5197 if (path->slots[0] == nritems)
5198 path->slots[0]--;
5199
5200 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5201 if (found_key.objectid < min_objectid)
5202 break;
5203 if (found_key.type == type)
5204 return 0;
5205 if (found_key.objectid == min_objectid &&
5206 found_key.type < type)
5207 break;
5208 }
5209 return 1;
5210 }
5211
5212 /*
5213 * search in extent tree to find a previous Metadata/Data extent item with
5214 * min objecitd.
5215 *
5216 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5217 */
btrfs_previous_extent_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid)5218 int btrfs_previous_extent_item(struct btrfs_root *root,
5219 struct btrfs_path *path, u64 min_objectid)
5220 {
5221 struct btrfs_key found_key;
5222 struct extent_buffer *leaf;
5223 u32 nritems;
5224 int ret;
5225
5226 while (1) {
5227 if (path->slots[0] == 0) {
5228 ret = btrfs_prev_leaf(root, path);
5229 if (ret != 0)
5230 return ret;
5231 } else {
5232 path->slots[0]--;
5233 }
5234 leaf = path->nodes[0];
5235 nritems = btrfs_header_nritems(leaf);
5236 if (nritems == 0)
5237 return 1;
5238 if (path->slots[0] == nritems)
5239 path->slots[0]--;
5240
5241 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5242 if (found_key.objectid < min_objectid)
5243 break;
5244 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5245 found_key.type == BTRFS_METADATA_ITEM_KEY)
5246 return 0;
5247 if (found_key.objectid == min_objectid &&
5248 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5249 break;
5250 }
5251 return 1;
5252 }
5253
btrfs_ctree_init(void)5254 int __init btrfs_ctree_init(void)
5255 {
5256 btrfs_path_cachep = kmem_cache_create("btrfs_path",
5257 sizeof(struct btrfs_path), 0,
5258 SLAB_MEM_SPREAD, NULL);
5259 if (!btrfs_path_cachep)
5260 return -ENOMEM;
5261 return 0;
5262 }
5263
btrfs_ctree_exit(void)5264 void __cold btrfs_ctree_exit(void)
5265 {
5266 kmem_cache_destroy(btrfs_path_cachep);
5267 }
5268