xref: /openbmc/linux/fs/btrfs/qgroup.c (revision a701d28e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 
15 #include "ctree.h"
16 #include "transaction.h"
17 #include "disk-io.h"
18 #include "locking.h"
19 #include "ulist.h"
20 #include "backref.h"
21 #include "extent_io.h"
22 #include "qgroup.h"
23 #include "block-group.h"
24 #include "sysfs.h"
25 
26 /* TODO XXX FIXME
27  *  - subvol delete -> delete when ref goes to 0? delete limits also?
28  *  - reorganize keys
29  *  - compressed
30  *  - sync
31  *  - copy also limits on subvol creation
32  *  - limit
33  *  - caches for ulists
34  *  - performance benchmarks
35  *  - check all ioctl parameters
36  */
37 
38 /*
39  * Helpers to access qgroup reservation
40  *
41  * Callers should ensure the lock context and type are valid
42  */
43 
44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 {
46 	u64 ret = 0;
47 	int i;
48 
49 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
50 		ret += qgroup->rsv.values[i];
51 
52 	return ret;
53 }
54 
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
57 {
58 	if (type == BTRFS_QGROUP_RSV_DATA)
59 		return "data";
60 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
61 		return "meta_pertrans";
62 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
63 		return "meta_prealloc";
64 	return NULL;
65 }
66 #endif
67 
68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
69 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
70 			   enum btrfs_qgroup_rsv_type type)
71 {
72 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
73 	qgroup->rsv.values[type] += num_bytes;
74 }
75 
76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
77 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
78 			       enum btrfs_qgroup_rsv_type type)
79 {
80 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
81 	if (qgroup->rsv.values[type] >= num_bytes) {
82 		qgroup->rsv.values[type] -= num_bytes;
83 		return;
84 	}
85 #ifdef CONFIG_BTRFS_DEBUG
86 	WARN_RATELIMIT(1,
87 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 		qgroup->qgroupid, qgroup_rsv_type_str(type),
89 		qgroup->rsv.values[type], num_bytes);
90 #endif
91 	qgroup->rsv.values[type] = 0;
92 }
93 
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
95 				     struct btrfs_qgroup *dest,
96 				     struct btrfs_qgroup *src)
97 {
98 	int i;
99 
100 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
101 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
102 }
103 
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
105 					 struct btrfs_qgroup *dest,
106 					  struct btrfs_qgroup *src)
107 {
108 	int i;
109 
110 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
111 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
112 }
113 
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
115 					   int mod)
116 {
117 	if (qg->old_refcnt < seq)
118 		qg->old_refcnt = seq;
119 	qg->old_refcnt += mod;
120 }
121 
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
123 					   int mod)
124 {
125 	if (qg->new_refcnt < seq)
126 		qg->new_refcnt = seq;
127 	qg->new_refcnt += mod;
128 }
129 
130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
131 {
132 	if (qg->old_refcnt < seq)
133 		return 0;
134 	return qg->old_refcnt - seq;
135 }
136 
137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
138 {
139 	if (qg->new_refcnt < seq)
140 		return 0;
141 	return qg->new_refcnt - seq;
142 }
143 
144 /*
145  * glue structure to represent the relations between qgroups.
146  */
147 struct btrfs_qgroup_list {
148 	struct list_head next_group;
149 	struct list_head next_member;
150 	struct btrfs_qgroup *group;
151 	struct btrfs_qgroup *member;
152 };
153 
154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
155 {
156 	return (u64)(uintptr_t)qg;
157 }
158 
159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
160 {
161 	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
162 }
163 
164 static int
165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
166 		   int init_flags);
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
168 
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
171 					   u64 qgroupid)
172 {
173 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
174 	struct btrfs_qgroup *qgroup;
175 
176 	while (n) {
177 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
178 		if (qgroup->qgroupid < qgroupid)
179 			n = n->rb_left;
180 		else if (qgroup->qgroupid > qgroupid)
181 			n = n->rb_right;
182 		else
183 			return qgroup;
184 	}
185 	return NULL;
186 }
187 
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
190 					  u64 qgroupid)
191 {
192 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
193 	struct rb_node *parent = NULL;
194 	struct btrfs_qgroup *qgroup;
195 
196 	while (*p) {
197 		parent = *p;
198 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
199 
200 		if (qgroup->qgroupid < qgroupid)
201 			p = &(*p)->rb_left;
202 		else if (qgroup->qgroupid > qgroupid)
203 			p = &(*p)->rb_right;
204 		else
205 			return qgroup;
206 	}
207 
208 	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
209 	if (!qgroup)
210 		return ERR_PTR(-ENOMEM);
211 
212 	qgroup->qgroupid = qgroupid;
213 	INIT_LIST_HEAD(&qgroup->groups);
214 	INIT_LIST_HEAD(&qgroup->members);
215 	INIT_LIST_HEAD(&qgroup->dirty);
216 
217 	rb_link_node(&qgroup->node, parent, p);
218 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
219 
220 	return qgroup;
221 }
222 
223 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
224 			    struct btrfs_qgroup *qgroup)
225 {
226 	struct btrfs_qgroup_list *list;
227 
228 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
229 	list_del(&qgroup->dirty);
230 	while (!list_empty(&qgroup->groups)) {
231 		list = list_first_entry(&qgroup->groups,
232 					struct btrfs_qgroup_list, next_group);
233 		list_del(&list->next_group);
234 		list_del(&list->next_member);
235 		kfree(list);
236 	}
237 
238 	while (!list_empty(&qgroup->members)) {
239 		list = list_first_entry(&qgroup->members,
240 					struct btrfs_qgroup_list, next_member);
241 		list_del(&list->next_group);
242 		list_del(&list->next_member);
243 		kfree(list);
244 	}
245 	kfree(qgroup);
246 }
247 
248 /* must be called with qgroup_lock held */
249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
250 {
251 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
252 
253 	if (!qgroup)
254 		return -ENOENT;
255 
256 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257 	__del_qgroup_rb(fs_info, qgroup);
258 	return 0;
259 }
260 
261 /* must be called with qgroup_lock held */
262 static int add_relation_rb(struct btrfs_fs_info *fs_info,
263 			   u64 memberid, u64 parentid)
264 {
265 	struct btrfs_qgroup *member;
266 	struct btrfs_qgroup *parent;
267 	struct btrfs_qgroup_list *list;
268 
269 	member = find_qgroup_rb(fs_info, memberid);
270 	parent = find_qgroup_rb(fs_info, parentid);
271 	if (!member || !parent)
272 		return -ENOENT;
273 
274 	list = kzalloc(sizeof(*list), GFP_ATOMIC);
275 	if (!list)
276 		return -ENOMEM;
277 
278 	list->group = parent;
279 	list->member = member;
280 	list_add_tail(&list->next_group, &member->groups);
281 	list_add_tail(&list->next_member, &parent->members);
282 
283 	return 0;
284 }
285 
286 /* must be called with qgroup_lock held */
287 static int del_relation_rb(struct btrfs_fs_info *fs_info,
288 			   u64 memberid, u64 parentid)
289 {
290 	struct btrfs_qgroup *member;
291 	struct btrfs_qgroup *parent;
292 	struct btrfs_qgroup_list *list;
293 
294 	member = find_qgroup_rb(fs_info, memberid);
295 	parent = find_qgroup_rb(fs_info, parentid);
296 	if (!member || !parent)
297 		return -ENOENT;
298 
299 	list_for_each_entry(list, &member->groups, next_group) {
300 		if (list->group == parent) {
301 			list_del(&list->next_group);
302 			list_del(&list->next_member);
303 			kfree(list);
304 			return 0;
305 		}
306 	}
307 	return -ENOENT;
308 }
309 
310 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
311 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
312 			       u64 rfer, u64 excl)
313 {
314 	struct btrfs_qgroup *qgroup;
315 
316 	qgroup = find_qgroup_rb(fs_info, qgroupid);
317 	if (!qgroup)
318 		return -EINVAL;
319 	if (qgroup->rfer != rfer || qgroup->excl != excl)
320 		return -EINVAL;
321 	return 0;
322 }
323 #endif
324 
325 /*
326  * The full config is read in one go, only called from open_ctree()
327  * It doesn't use any locking, as at this point we're still single-threaded
328  */
329 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
330 {
331 	struct btrfs_key key;
332 	struct btrfs_key found_key;
333 	struct btrfs_root *quota_root = fs_info->quota_root;
334 	struct btrfs_path *path = NULL;
335 	struct extent_buffer *l;
336 	int slot;
337 	int ret = 0;
338 	u64 flags = 0;
339 	u64 rescan_progress = 0;
340 
341 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
342 		return 0;
343 
344 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
345 	if (!fs_info->qgroup_ulist) {
346 		ret = -ENOMEM;
347 		goto out;
348 	}
349 
350 	path = btrfs_alloc_path();
351 	if (!path) {
352 		ret = -ENOMEM;
353 		goto out;
354 	}
355 
356 	ret = btrfs_sysfs_add_qgroups(fs_info);
357 	if (ret < 0)
358 		goto out;
359 	/* default this to quota off, in case no status key is found */
360 	fs_info->qgroup_flags = 0;
361 
362 	/*
363 	 * pass 1: read status, all qgroup infos and limits
364 	 */
365 	key.objectid = 0;
366 	key.type = 0;
367 	key.offset = 0;
368 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
369 	if (ret)
370 		goto out;
371 
372 	while (1) {
373 		struct btrfs_qgroup *qgroup;
374 
375 		slot = path->slots[0];
376 		l = path->nodes[0];
377 		btrfs_item_key_to_cpu(l, &found_key, slot);
378 
379 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
380 			struct btrfs_qgroup_status_item *ptr;
381 
382 			ptr = btrfs_item_ptr(l, slot,
383 					     struct btrfs_qgroup_status_item);
384 
385 			if (btrfs_qgroup_status_version(l, ptr) !=
386 			    BTRFS_QGROUP_STATUS_VERSION) {
387 				btrfs_err(fs_info,
388 				 "old qgroup version, quota disabled");
389 				goto out;
390 			}
391 			if (btrfs_qgroup_status_generation(l, ptr) !=
392 			    fs_info->generation) {
393 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
394 				btrfs_err(fs_info,
395 					"qgroup generation mismatch, marked as inconsistent");
396 			}
397 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
398 									  ptr);
399 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
400 			goto next1;
401 		}
402 
403 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
404 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
405 			goto next1;
406 
407 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
408 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
409 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
410 			btrfs_err(fs_info, "inconsistent qgroup config");
411 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
412 		}
413 		if (!qgroup) {
414 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
415 			if (IS_ERR(qgroup)) {
416 				ret = PTR_ERR(qgroup);
417 				goto out;
418 			}
419 		}
420 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
421 		if (ret < 0)
422 			goto out;
423 
424 		switch (found_key.type) {
425 		case BTRFS_QGROUP_INFO_KEY: {
426 			struct btrfs_qgroup_info_item *ptr;
427 
428 			ptr = btrfs_item_ptr(l, slot,
429 					     struct btrfs_qgroup_info_item);
430 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
431 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
432 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
433 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
434 			/* generation currently unused */
435 			break;
436 		}
437 		case BTRFS_QGROUP_LIMIT_KEY: {
438 			struct btrfs_qgroup_limit_item *ptr;
439 
440 			ptr = btrfs_item_ptr(l, slot,
441 					     struct btrfs_qgroup_limit_item);
442 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
443 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
444 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
445 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
446 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
447 			break;
448 		}
449 		}
450 next1:
451 		ret = btrfs_next_item(quota_root, path);
452 		if (ret < 0)
453 			goto out;
454 		if (ret)
455 			break;
456 	}
457 	btrfs_release_path(path);
458 
459 	/*
460 	 * pass 2: read all qgroup relations
461 	 */
462 	key.objectid = 0;
463 	key.type = BTRFS_QGROUP_RELATION_KEY;
464 	key.offset = 0;
465 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
466 	if (ret)
467 		goto out;
468 	while (1) {
469 		slot = path->slots[0];
470 		l = path->nodes[0];
471 		btrfs_item_key_to_cpu(l, &found_key, slot);
472 
473 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
474 			goto next2;
475 
476 		if (found_key.objectid > found_key.offset) {
477 			/* parent <- member, not needed to build config */
478 			/* FIXME should we omit the key completely? */
479 			goto next2;
480 		}
481 
482 		ret = add_relation_rb(fs_info, found_key.objectid,
483 				      found_key.offset);
484 		if (ret == -ENOENT) {
485 			btrfs_warn(fs_info,
486 				"orphan qgroup relation 0x%llx->0x%llx",
487 				found_key.objectid, found_key.offset);
488 			ret = 0;	/* ignore the error */
489 		}
490 		if (ret)
491 			goto out;
492 next2:
493 		ret = btrfs_next_item(quota_root, path);
494 		if (ret < 0)
495 			goto out;
496 		if (ret)
497 			break;
498 	}
499 out:
500 	fs_info->qgroup_flags |= flags;
501 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
502 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
503 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
504 		 ret >= 0)
505 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
506 	btrfs_free_path(path);
507 
508 	if (ret < 0) {
509 		ulist_free(fs_info->qgroup_ulist);
510 		fs_info->qgroup_ulist = NULL;
511 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
512 		btrfs_sysfs_del_qgroups(fs_info);
513 	}
514 
515 	return ret < 0 ? ret : 0;
516 }
517 
518 /*
519  * Called in close_ctree() when quota is still enabled.  This verifies we don't
520  * leak some reserved space.
521  *
522  * Return false if no reserved space is left.
523  * Return true if some reserved space is leaked.
524  */
525 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
526 {
527 	struct rb_node *node;
528 	bool ret = false;
529 
530 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
531 		return ret;
532 	/*
533 	 * Since we're unmounting, there is no race and no need to grab qgroup
534 	 * lock.  And here we don't go post-order to provide a more user
535 	 * friendly sorted result.
536 	 */
537 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
538 		struct btrfs_qgroup *qgroup;
539 		int i;
540 
541 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
542 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
543 			if (qgroup->rsv.values[i]) {
544 				ret = true;
545 				btrfs_warn(fs_info,
546 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
547 				   btrfs_qgroup_level(qgroup->qgroupid),
548 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
549 				   i, qgroup->rsv.values[i]);
550 			}
551 		}
552 	}
553 	return ret;
554 }
555 
556 /*
557  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
558  * first two are in single-threaded paths.And for the third one, we have set
559  * quota_root to be null with qgroup_lock held before, so it is safe to clean
560  * up the in-memory structures without qgroup_lock held.
561  */
562 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
563 {
564 	struct rb_node *n;
565 	struct btrfs_qgroup *qgroup;
566 
567 	while ((n = rb_first(&fs_info->qgroup_tree))) {
568 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
569 		rb_erase(n, &fs_info->qgroup_tree);
570 		__del_qgroup_rb(fs_info, qgroup);
571 	}
572 	/*
573 	 * We call btrfs_free_qgroup_config() when unmounting
574 	 * filesystem and disabling quota, so we set qgroup_ulist
575 	 * to be null here to avoid double free.
576 	 */
577 	ulist_free(fs_info->qgroup_ulist);
578 	fs_info->qgroup_ulist = NULL;
579 	btrfs_sysfs_del_qgroups(fs_info);
580 }
581 
582 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
583 				    u64 dst)
584 {
585 	int ret;
586 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
587 	struct btrfs_path *path;
588 	struct btrfs_key key;
589 
590 	path = btrfs_alloc_path();
591 	if (!path)
592 		return -ENOMEM;
593 
594 	key.objectid = src;
595 	key.type = BTRFS_QGROUP_RELATION_KEY;
596 	key.offset = dst;
597 
598 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
599 
600 	btrfs_mark_buffer_dirty(path->nodes[0]);
601 
602 	btrfs_free_path(path);
603 	return ret;
604 }
605 
606 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
607 				    u64 dst)
608 {
609 	int ret;
610 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
611 	struct btrfs_path *path;
612 	struct btrfs_key key;
613 
614 	path = btrfs_alloc_path();
615 	if (!path)
616 		return -ENOMEM;
617 
618 	key.objectid = src;
619 	key.type = BTRFS_QGROUP_RELATION_KEY;
620 	key.offset = dst;
621 
622 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
623 	if (ret < 0)
624 		goto out;
625 
626 	if (ret > 0) {
627 		ret = -ENOENT;
628 		goto out;
629 	}
630 
631 	ret = btrfs_del_item(trans, quota_root, path);
632 out:
633 	btrfs_free_path(path);
634 	return ret;
635 }
636 
637 static int add_qgroup_item(struct btrfs_trans_handle *trans,
638 			   struct btrfs_root *quota_root, u64 qgroupid)
639 {
640 	int ret;
641 	struct btrfs_path *path;
642 	struct btrfs_qgroup_info_item *qgroup_info;
643 	struct btrfs_qgroup_limit_item *qgroup_limit;
644 	struct extent_buffer *leaf;
645 	struct btrfs_key key;
646 
647 	if (btrfs_is_testing(quota_root->fs_info))
648 		return 0;
649 
650 	path = btrfs_alloc_path();
651 	if (!path)
652 		return -ENOMEM;
653 
654 	key.objectid = 0;
655 	key.type = BTRFS_QGROUP_INFO_KEY;
656 	key.offset = qgroupid;
657 
658 	/*
659 	 * Avoid a transaction abort by catching -EEXIST here. In that
660 	 * case, we proceed by re-initializing the existing structure
661 	 * on disk.
662 	 */
663 
664 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
665 				      sizeof(*qgroup_info));
666 	if (ret && ret != -EEXIST)
667 		goto out;
668 
669 	leaf = path->nodes[0];
670 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
671 				 struct btrfs_qgroup_info_item);
672 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
673 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
674 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
675 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
676 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
677 
678 	btrfs_mark_buffer_dirty(leaf);
679 
680 	btrfs_release_path(path);
681 
682 	key.type = BTRFS_QGROUP_LIMIT_KEY;
683 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
684 				      sizeof(*qgroup_limit));
685 	if (ret && ret != -EEXIST)
686 		goto out;
687 
688 	leaf = path->nodes[0];
689 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
690 				  struct btrfs_qgroup_limit_item);
691 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
692 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
693 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
694 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
695 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
696 
697 	btrfs_mark_buffer_dirty(leaf);
698 
699 	ret = 0;
700 out:
701 	btrfs_free_path(path);
702 	return ret;
703 }
704 
705 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
706 {
707 	int ret;
708 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
709 	struct btrfs_path *path;
710 	struct btrfs_key key;
711 
712 	path = btrfs_alloc_path();
713 	if (!path)
714 		return -ENOMEM;
715 
716 	key.objectid = 0;
717 	key.type = BTRFS_QGROUP_INFO_KEY;
718 	key.offset = qgroupid;
719 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
720 	if (ret < 0)
721 		goto out;
722 
723 	if (ret > 0) {
724 		ret = -ENOENT;
725 		goto out;
726 	}
727 
728 	ret = btrfs_del_item(trans, quota_root, path);
729 	if (ret)
730 		goto out;
731 
732 	btrfs_release_path(path);
733 
734 	key.type = BTRFS_QGROUP_LIMIT_KEY;
735 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
736 	if (ret < 0)
737 		goto out;
738 
739 	if (ret > 0) {
740 		ret = -ENOENT;
741 		goto out;
742 	}
743 
744 	ret = btrfs_del_item(trans, quota_root, path);
745 
746 out:
747 	btrfs_free_path(path);
748 	return ret;
749 }
750 
751 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
752 				    struct btrfs_qgroup *qgroup)
753 {
754 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
755 	struct btrfs_path *path;
756 	struct btrfs_key key;
757 	struct extent_buffer *l;
758 	struct btrfs_qgroup_limit_item *qgroup_limit;
759 	int ret;
760 	int slot;
761 
762 	key.objectid = 0;
763 	key.type = BTRFS_QGROUP_LIMIT_KEY;
764 	key.offset = qgroup->qgroupid;
765 
766 	path = btrfs_alloc_path();
767 	if (!path)
768 		return -ENOMEM;
769 
770 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
771 	if (ret > 0)
772 		ret = -ENOENT;
773 
774 	if (ret)
775 		goto out;
776 
777 	l = path->nodes[0];
778 	slot = path->slots[0];
779 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
780 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
781 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
782 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
783 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
784 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
785 
786 	btrfs_mark_buffer_dirty(l);
787 
788 out:
789 	btrfs_free_path(path);
790 	return ret;
791 }
792 
793 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
794 				   struct btrfs_qgroup *qgroup)
795 {
796 	struct btrfs_fs_info *fs_info = trans->fs_info;
797 	struct btrfs_root *quota_root = fs_info->quota_root;
798 	struct btrfs_path *path;
799 	struct btrfs_key key;
800 	struct extent_buffer *l;
801 	struct btrfs_qgroup_info_item *qgroup_info;
802 	int ret;
803 	int slot;
804 
805 	if (btrfs_is_testing(fs_info))
806 		return 0;
807 
808 	key.objectid = 0;
809 	key.type = BTRFS_QGROUP_INFO_KEY;
810 	key.offset = qgroup->qgroupid;
811 
812 	path = btrfs_alloc_path();
813 	if (!path)
814 		return -ENOMEM;
815 
816 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
817 	if (ret > 0)
818 		ret = -ENOENT;
819 
820 	if (ret)
821 		goto out;
822 
823 	l = path->nodes[0];
824 	slot = path->slots[0];
825 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
826 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
827 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
828 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
829 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
830 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
831 
832 	btrfs_mark_buffer_dirty(l);
833 
834 out:
835 	btrfs_free_path(path);
836 	return ret;
837 }
838 
839 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
840 {
841 	struct btrfs_fs_info *fs_info = trans->fs_info;
842 	struct btrfs_root *quota_root = fs_info->quota_root;
843 	struct btrfs_path *path;
844 	struct btrfs_key key;
845 	struct extent_buffer *l;
846 	struct btrfs_qgroup_status_item *ptr;
847 	int ret;
848 	int slot;
849 
850 	key.objectid = 0;
851 	key.type = BTRFS_QGROUP_STATUS_KEY;
852 	key.offset = 0;
853 
854 	path = btrfs_alloc_path();
855 	if (!path)
856 		return -ENOMEM;
857 
858 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
859 	if (ret > 0)
860 		ret = -ENOENT;
861 
862 	if (ret)
863 		goto out;
864 
865 	l = path->nodes[0];
866 	slot = path->slots[0];
867 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
868 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
869 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
870 	btrfs_set_qgroup_status_rescan(l, ptr,
871 				fs_info->qgroup_rescan_progress.objectid);
872 
873 	btrfs_mark_buffer_dirty(l);
874 
875 out:
876 	btrfs_free_path(path);
877 	return ret;
878 }
879 
880 /*
881  * called with qgroup_lock held
882  */
883 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
884 				  struct btrfs_root *root)
885 {
886 	struct btrfs_path *path;
887 	struct btrfs_key key;
888 	struct extent_buffer *leaf = NULL;
889 	int ret;
890 	int nr = 0;
891 
892 	path = btrfs_alloc_path();
893 	if (!path)
894 		return -ENOMEM;
895 
896 	path->leave_spinning = 1;
897 
898 	key.objectid = 0;
899 	key.offset = 0;
900 	key.type = 0;
901 
902 	while (1) {
903 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
904 		if (ret < 0)
905 			goto out;
906 		leaf = path->nodes[0];
907 		nr = btrfs_header_nritems(leaf);
908 		if (!nr)
909 			break;
910 		/*
911 		 * delete the leaf one by one
912 		 * since the whole tree is going
913 		 * to be deleted.
914 		 */
915 		path->slots[0] = 0;
916 		ret = btrfs_del_items(trans, root, path, 0, nr);
917 		if (ret)
918 			goto out;
919 
920 		btrfs_release_path(path);
921 	}
922 	ret = 0;
923 out:
924 	btrfs_free_path(path);
925 	return ret;
926 }
927 
928 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
929 {
930 	struct btrfs_root *quota_root;
931 	struct btrfs_root *tree_root = fs_info->tree_root;
932 	struct btrfs_path *path = NULL;
933 	struct btrfs_qgroup_status_item *ptr;
934 	struct extent_buffer *leaf;
935 	struct btrfs_key key;
936 	struct btrfs_key found_key;
937 	struct btrfs_qgroup *qgroup = NULL;
938 	struct btrfs_trans_handle *trans = NULL;
939 	int ret = 0;
940 	int slot;
941 
942 	mutex_lock(&fs_info->qgroup_ioctl_lock);
943 	if (fs_info->quota_root)
944 		goto out;
945 
946 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
947 	if (!fs_info->qgroup_ulist) {
948 		ret = -ENOMEM;
949 		goto out;
950 	}
951 
952 	ret = btrfs_sysfs_add_qgroups(fs_info);
953 	if (ret < 0)
954 		goto out;
955 	/*
956 	 * 1 for quota root item
957 	 * 1 for BTRFS_QGROUP_STATUS item
958 	 *
959 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
960 	 * per subvolume. However those are not currently reserved since it
961 	 * would be a lot of overkill.
962 	 */
963 	trans = btrfs_start_transaction(tree_root, 2);
964 	if (IS_ERR(trans)) {
965 		ret = PTR_ERR(trans);
966 		trans = NULL;
967 		goto out;
968 	}
969 
970 	/*
971 	 * initially create the quota tree
972 	 */
973 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
974 	if (IS_ERR(quota_root)) {
975 		ret =  PTR_ERR(quota_root);
976 		btrfs_abort_transaction(trans, ret);
977 		goto out;
978 	}
979 
980 	path = btrfs_alloc_path();
981 	if (!path) {
982 		ret = -ENOMEM;
983 		btrfs_abort_transaction(trans, ret);
984 		goto out_free_root;
985 	}
986 
987 	key.objectid = 0;
988 	key.type = BTRFS_QGROUP_STATUS_KEY;
989 	key.offset = 0;
990 
991 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
992 				      sizeof(*ptr));
993 	if (ret) {
994 		btrfs_abort_transaction(trans, ret);
995 		goto out_free_path;
996 	}
997 
998 	leaf = path->nodes[0];
999 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1000 				 struct btrfs_qgroup_status_item);
1001 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1002 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1003 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
1004 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1005 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
1006 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1007 
1008 	btrfs_mark_buffer_dirty(leaf);
1009 
1010 	key.objectid = 0;
1011 	key.type = BTRFS_ROOT_REF_KEY;
1012 	key.offset = 0;
1013 
1014 	btrfs_release_path(path);
1015 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1016 	if (ret > 0)
1017 		goto out_add_root;
1018 	if (ret < 0) {
1019 		btrfs_abort_transaction(trans, ret);
1020 		goto out_free_path;
1021 	}
1022 
1023 	while (1) {
1024 		slot = path->slots[0];
1025 		leaf = path->nodes[0];
1026 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1027 
1028 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1029 
1030 			/* Release locks on tree_root before we access quota_root */
1031 			btrfs_release_path(path);
1032 
1033 			ret = add_qgroup_item(trans, quota_root,
1034 					      found_key.offset);
1035 			if (ret) {
1036 				btrfs_abort_transaction(trans, ret);
1037 				goto out_free_path;
1038 			}
1039 
1040 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
1041 			if (IS_ERR(qgroup)) {
1042 				ret = PTR_ERR(qgroup);
1043 				btrfs_abort_transaction(trans, ret);
1044 				goto out_free_path;
1045 			}
1046 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1047 			if (ret < 0) {
1048 				btrfs_abort_transaction(trans, ret);
1049 				goto out_free_path;
1050 			}
1051 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1052 							 path, 1, 0);
1053 			if (ret < 0) {
1054 				btrfs_abort_transaction(trans, ret);
1055 				goto out_free_path;
1056 			}
1057 			if (ret > 0) {
1058 				/*
1059 				 * Shouldn't happen, but in case it does we
1060 				 * don't need to do the btrfs_next_item, just
1061 				 * continue.
1062 				 */
1063 				continue;
1064 			}
1065 		}
1066 		ret = btrfs_next_item(tree_root, path);
1067 		if (ret < 0) {
1068 			btrfs_abort_transaction(trans, ret);
1069 			goto out_free_path;
1070 		}
1071 		if (ret)
1072 			break;
1073 	}
1074 
1075 out_add_root:
1076 	btrfs_release_path(path);
1077 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1078 	if (ret) {
1079 		btrfs_abort_transaction(trans, ret);
1080 		goto out_free_path;
1081 	}
1082 
1083 	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1084 	if (IS_ERR(qgroup)) {
1085 		ret = PTR_ERR(qgroup);
1086 		btrfs_abort_transaction(trans, ret);
1087 		goto out_free_path;
1088 	}
1089 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1090 	if (ret < 0) {
1091 		btrfs_abort_transaction(trans, ret);
1092 		goto out_free_path;
1093 	}
1094 
1095 	ret = btrfs_commit_transaction(trans);
1096 	trans = NULL;
1097 	if (ret)
1098 		goto out_free_path;
1099 
1100 	/*
1101 	 * Set quota enabled flag after committing the transaction, to avoid
1102 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1103 	 * creation.
1104 	 */
1105 	spin_lock(&fs_info->qgroup_lock);
1106 	fs_info->quota_root = quota_root;
1107 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1108 	spin_unlock(&fs_info->qgroup_lock);
1109 
1110 	ret = qgroup_rescan_init(fs_info, 0, 1);
1111 	if (!ret) {
1112 	        qgroup_rescan_zero_tracking(fs_info);
1113 		fs_info->qgroup_rescan_running = true;
1114 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1115 	                         &fs_info->qgroup_rescan_work);
1116 	}
1117 
1118 out_free_path:
1119 	btrfs_free_path(path);
1120 out_free_root:
1121 	if (ret)
1122 		btrfs_put_root(quota_root);
1123 out:
1124 	if (ret) {
1125 		ulist_free(fs_info->qgroup_ulist);
1126 		fs_info->qgroup_ulist = NULL;
1127 		if (trans)
1128 			btrfs_end_transaction(trans);
1129 		btrfs_sysfs_del_qgroups(fs_info);
1130 	}
1131 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1132 	return ret;
1133 }
1134 
1135 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1136 {
1137 	struct btrfs_root *quota_root;
1138 	struct btrfs_trans_handle *trans = NULL;
1139 	int ret = 0;
1140 
1141 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1142 	if (!fs_info->quota_root)
1143 		goto out;
1144 
1145 	/*
1146 	 * 1 For the root item
1147 	 *
1148 	 * We should also reserve enough items for the quota tree deletion in
1149 	 * btrfs_clean_quota_tree but this is not done.
1150 	 */
1151 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1152 	if (IS_ERR(trans)) {
1153 		ret = PTR_ERR(trans);
1154 		goto out;
1155 	}
1156 
1157 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1158 	btrfs_qgroup_wait_for_completion(fs_info, false);
1159 	spin_lock(&fs_info->qgroup_lock);
1160 	quota_root = fs_info->quota_root;
1161 	fs_info->quota_root = NULL;
1162 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1163 	spin_unlock(&fs_info->qgroup_lock);
1164 
1165 	btrfs_free_qgroup_config(fs_info);
1166 
1167 	ret = btrfs_clean_quota_tree(trans, quota_root);
1168 	if (ret) {
1169 		btrfs_abort_transaction(trans, ret);
1170 		goto end_trans;
1171 	}
1172 
1173 	ret = btrfs_del_root(trans, &quota_root->root_key);
1174 	if (ret) {
1175 		btrfs_abort_transaction(trans, ret);
1176 		goto end_trans;
1177 	}
1178 
1179 	list_del(&quota_root->dirty_list);
1180 
1181 	btrfs_tree_lock(quota_root->node);
1182 	btrfs_clean_tree_block(quota_root->node);
1183 	btrfs_tree_unlock(quota_root->node);
1184 	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1185 
1186 	btrfs_put_root(quota_root);
1187 
1188 end_trans:
1189 	ret = btrfs_end_transaction(trans);
1190 out:
1191 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1192 	return ret;
1193 }
1194 
1195 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1196 			 struct btrfs_qgroup *qgroup)
1197 {
1198 	if (list_empty(&qgroup->dirty))
1199 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1200 }
1201 
1202 /*
1203  * The easy accounting, we're updating qgroup relationship whose child qgroup
1204  * only has exclusive extents.
1205  *
1206  * In this case, all exclusive extents will also be exclusive for parent, so
1207  * excl/rfer just get added/removed.
1208  *
1209  * So is qgroup reservation space, which should also be added/removed to
1210  * parent.
1211  * Or when child tries to release reservation space, parent will underflow its
1212  * reservation (for relationship adding case).
1213  *
1214  * Caller should hold fs_info->qgroup_lock.
1215  */
1216 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1217 				    struct ulist *tmp, u64 ref_root,
1218 				    struct btrfs_qgroup *src, int sign)
1219 {
1220 	struct btrfs_qgroup *qgroup;
1221 	struct btrfs_qgroup_list *glist;
1222 	struct ulist_node *unode;
1223 	struct ulist_iterator uiter;
1224 	u64 num_bytes = src->excl;
1225 	int ret = 0;
1226 
1227 	qgroup = find_qgroup_rb(fs_info, ref_root);
1228 	if (!qgroup)
1229 		goto out;
1230 
1231 	qgroup->rfer += sign * num_bytes;
1232 	qgroup->rfer_cmpr += sign * num_bytes;
1233 
1234 	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1235 	qgroup->excl += sign * num_bytes;
1236 	qgroup->excl_cmpr += sign * num_bytes;
1237 
1238 	if (sign > 0)
1239 		qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1240 	else
1241 		qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1242 
1243 	qgroup_dirty(fs_info, qgroup);
1244 
1245 	/* Get all of the parent groups that contain this qgroup */
1246 	list_for_each_entry(glist, &qgroup->groups, next_group) {
1247 		ret = ulist_add(tmp, glist->group->qgroupid,
1248 				qgroup_to_aux(glist->group), GFP_ATOMIC);
1249 		if (ret < 0)
1250 			goto out;
1251 	}
1252 
1253 	/* Iterate all of the parents and adjust their reference counts */
1254 	ULIST_ITER_INIT(&uiter);
1255 	while ((unode = ulist_next(tmp, &uiter))) {
1256 		qgroup = unode_aux_to_qgroup(unode);
1257 		qgroup->rfer += sign * num_bytes;
1258 		qgroup->rfer_cmpr += sign * num_bytes;
1259 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1260 		qgroup->excl += sign * num_bytes;
1261 		if (sign > 0)
1262 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1263 		else
1264 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1265 		qgroup->excl_cmpr += sign * num_bytes;
1266 		qgroup_dirty(fs_info, qgroup);
1267 
1268 		/* Add any parents of the parents */
1269 		list_for_each_entry(glist, &qgroup->groups, next_group) {
1270 			ret = ulist_add(tmp, glist->group->qgroupid,
1271 					qgroup_to_aux(glist->group), GFP_ATOMIC);
1272 			if (ret < 0)
1273 				goto out;
1274 		}
1275 	}
1276 	ret = 0;
1277 out:
1278 	return ret;
1279 }
1280 
1281 
1282 /*
1283  * Quick path for updating qgroup with only excl refs.
1284  *
1285  * In that case, just update all parent will be enough.
1286  * Or we needs to do a full rescan.
1287  * Caller should also hold fs_info->qgroup_lock.
1288  *
1289  * Return 0 for quick update, return >0 for need to full rescan
1290  * and mark INCONSISTENT flag.
1291  * Return < 0 for other error.
1292  */
1293 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1294 				   struct ulist *tmp, u64 src, u64 dst,
1295 				   int sign)
1296 {
1297 	struct btrfs_qgroup *qgroup;
1298 	int ret = 1;
1299 	int err = 0;
1300 
1301 	qgroup = find_qgroup_rb(fs_info, src);
1302 	if (!qgroup)
1303 		goto out;
1304 	if (qgroup->excl == qgroup->rfer) {
1305 		ret = 0;
1306 		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1307 					       qgroup, sign);
1308 		if (err < 0) {
1309 			ret = err;
1310 			goto out;
1311 		}
1312 	}
1313 out:
1314 	if (ret)
1315 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1316 	return ret;
1317 }
1318 
1319 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1320 			      u64 dst)
1321 {
1322 	struct btrfs_fs_info *fs_info = trans->fs_info;
1323 	struct btrfs_qgroup *parent;
1324 	struct btrfs_qgroup *member;
1325 	struct btrfs_qgroup_list *list;
1326 	struct ulist *tmp;
1327 	int ret = 0;
1328 
1329 	/* Check the level of src and dst first */
1330 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1331 		return -EINVAL;
1332 
1333 	tmp = ulist_alloc(GFP_KERNEL);
1334 	if (!tmp)
1335 		return -ENOMEM;
1336 
1337 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1338 	if (!fs_info->quota_root) {
1339 		ret = -ENOTCONN;
1340 		goto out;
1341 	}
1342 	member = find_qgroup_rb(fs_info, src);
1343 	parent = find_qgroup_rb(fs_info, dst);
1344 	if (!member || !parent) {
1345 		ret = -EINVAL;
1346 		goto out;
1347 	}
1348 
1349 	/* check if such qgroup relation exist firstly */
1350 	list_for_each_entry(list, &member->groups, next_group) {
1351 		if (list->group == parent) {
1352 			ret = -EEXIST;
1353 			goto out;
1354 		}
1355 	}
1356 
1357 	ret = add_qgroup_relation_item(trans, src, dst);
1358 	if (ret)
1359 		goto out;
1360 
1361 	ret = add_qgroup_relation_item(trans, dst, src);
1362 	if (ret) {
1363 		del_qgroup_relation_item(trans, src, dst);
1364 		goto out;
1365 	}
1366 
1367 	spin_lock(&fs_info->qgroup_lock);
1368 	ret = add_relation_rb(fs_info, src, dst);
1369 	if (ret < 0) {
1370 		spin_unlock(&fs_info->qgroup_lock);
1371 		goto out;
1372 	}
1373 	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1374 	spin_unlock(&fs_info->qgroup_lock);
1375 out:
1376 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1377 	ulist_free(tmp);
1378 	return ret;
1379 }
1380 
1381 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1382 				 u64 dst)
1383 {
1384 	struct btrfs_fs_info *fs_info = trans->fs_info;
1385 	struct btrfs_qgroup *parent;
1386 	struct btrfs_qgroup *member;
1387 	struct btrfs_qgroup_list *list;
1388 	struct ulist *tmp;
1389 	bool found = false;
1390 	int ret = 0;
1391 	int ret2;
1392 
1393 	tmp = ulist_alloc(GFP_KERNEL);
1394 	if (!tmp)
1395 		return -ENOMEM;
1396 
1397 	if (!fs_info->quota_root) {
1398 		ret = -ENOTCONN;
1399 		goto out;
1400 	}
1401 
1402 	member = find_qgroup_rb(fs_info, src);
1403 	parent = find_qgroup_rb(fs_info, dst);
1404 	/*
1405 	 * The parent/member pair doesn't exist, then try to delete the dead
1406 	 * relation items only.
1407 	 */
1408 	if (!member || !parent)
1409 		goto delete_item;
1410 
1411 	/* check if such qgroup relation exist firstly */
1412 	list_for_each_entry(list, &member->groups, next_group) {
1413 		if (list->group == parent) {
1414 			found = true;
1415 			break;
1416 		}
1417 	}
1418 
1419 delete_item:
1420 	ret = del_qgroup_relation_item(trans, src, dst);
1421 	if (ret < 0 && ret != -ENOENT)
1422 		goto out;
1423 	ret2 = del_qgroup_relation_item(trans, dst, src);
1424 	if (ret2 < 0 && ret2 != -ENOENT)
1425 		goto out;
1426 
1427 	/* At least one deletion succeeded, return 0 */
1428 	if (!ret || !ret2)
1429 		ret = 0;
1430 
1431 	if (found) {
1432 		spin_lock(&fs_info->qgroup_lock);
1433 		del_relation_rb(fs_info, src, dst);
1434 		ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1435 		spin_unlock(&fs_info->qgroup_lock);
1436 	}
1437 out:
1438 	ulist_free(tmp);
1439 	return ret;
1440 }
1441 
1442 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1443 			      u64 dst)
1444 {
1445 	struct btrfs_fs_info *fs_info = trans->fs_info;
1446 	int ret = 0;
1447 
1448 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1449 	ret = __del_qgroup_relation(trans, src, dst);
1450 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1451 
1452 	return ret;
1453 }
1454 
1455 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1456 {
1457 	struct btrfs_fs_info *fs_info = trans->fs_info;
1458 	struct btrfs_root *quota_root;
1459 	struct btrfs_qgroup *qgroup;
1460 	int ret = 0;
1461 
1462 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1463 	if (!fs_info->quota_root) {
1464 		ret = -ENOTCONN;
1465 		goto out;
1466 	}
1467 	quota_root = fs_info->quota_root;
1468 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1469 	if (qgroup) {
1470 		ret = -EEXIST;
1471 		goto out;
1472 	}
1473 
1474 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1475 	if (ret)
1476 		goto out;
1477 
1478 	spin_lock(&fs_info->qgroup_lock);
1479 	qgroup = add_qgroup_rb(fs_info, qgroupid);
1480 	spin_unlock(&fs_info->qgroup_lock);
1481 
1482 	if (IS_ERR(qgroup)) {
1483 		ret = PTR_ERR(qgroup);
1484 		goto out;
1485 	}
1486 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1487 out:
1488 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1489 	return ret;
1490 }
1491 
1492 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1493 {
1494 	struct btrfs_fs_info *fs_info = trans->fs_info;
1495 	struct btrfs_qgroup *qgroup;
1496 	struct btrfs_qgroup_list *list;
1497 	int ret = 0;
1498 
1499 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1500 	if (!fs_info->quota_root) {
1501 		ret = -ENOTCONN;
1502 		goto out;
1503 	}
1504 
1505 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1506 	if (!qgroup) {
1507 		ret = -ENOENT;
1508 		goto out;
1509 	}
1510 
1511 	/* Check if there are no children of this qgroup */
1512 	if (!list_empty(&qgroup->members)) {
1513 		ret = -EBUSY;
1514 		goto out;
1515 	}
1516 
1517 	ret = del_qgroup_item(trans, qgroupid);
1518 	if (ret && ret != -ENOENT)
1519 		goto out;
1520 
1521 	while (!list_empty(&qgroup->groups)) {
1522 		list = list_first_entry(&qgroup->groups,
1523 					struct btrfs_qgroup_list, next_group);
1524 		ret = __del_qgroup_relation(trans, qgroupid,
1525 					    list->group->qgroupid);
1526 		if (ret)
1527 			goto out;
1528 	}
1529 
1530 	spin_lock(&fs_info->qgroup_lock);
1531 	del_qgroup_rb(fs_info, qgroupid);
1532 	spin_unlock(&fs_info->qgroup_lock);
1533 out:
1534 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1535 	return ret;
1536 }
1537 
1538 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1539 		       struct btrfs_qgroup_limit *limit)
1540 {
1541 	struct btrfs_fs_info *fs_info = trans->fs_info;
1542 	struct btrfs_qgroup *qgroup;
1543 	int ret = 0;
1544 	/* Sometimes we would want to clear the limit on this qgroup.
1545 	 * To meet this requirement, we treat the -1 as a special value
1546 	 * which tell kernel to clear the limit on this qgroup.
1547 	 */
1548 	const u64 CLEAR_VALUE = -1;
1549 
1550 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1551 	if (!fs_info->quota_root) {
1552 		ret = -ENOTCONN;
1553 		goto out;
1554 	}
1555 
1556 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1557 	if (!qgroup) {
1558 		ret = -ENOENT;
1559 		goto out;
1560 	}
1561 
1562 	spin_lock(&fs_info->qgroup_lock);
1563 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1564 		if (limit->max_rfer == CLEAR_VALUE) {
1565 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1566 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1567 			qgroup->max_rfer = 0;
1568 		} else {
1569 			qgroup->max_rfer = limit->max_rfer;
1570 		}
1571 	}
1572 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1573 		if (limit->max_excl == CLEAR_VALUE) {
1574 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1575 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1576 			qgroup->max_excl = 0;
1577 		} else {
1578 			qgroup->max_excl = limit->max_excl;
1579 		}
1580 	}
1581 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1582 		if (limit->rsv_rfer == CLEAR_VALUE) {
1583 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1584 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1585 			qgroup->rsv_rfer = 0;
1586 		} else {
1587 			qgroup->rsv_rfer = limit->rsv_rfer;
1588 		}
1589 	}
1590 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1591 		if (limit->rsv_excl == CLEAR_VALUE) {
1592 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1593 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1594 			qgroup->rsv_excl = 0;
1595 		} else {
1596 			qgroup->rsv_excl = limit->rsv_excl;
1597 		}
1598 	}
1599 	qgroup->lim_flags |= limit->flags;
1600 
1601 	spin_unlock(&fs_info->qgroup_lock);
1602 
1603 	ret = update_qgroup_limit_item(trans, qgroup);
1604 	if (ret) {
1605 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1606 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1607 		       qgroupid);
1608 	}
1609 
1610 out:
1611 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1612 	return ret;
1613 }
1614 
1615 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1616 				struct btrfs_delayed_ref_root *delayed_refs,
1617 				struct btrfs_qgroup_extent_record *record)
1618 {
1619 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1620 	struct rb_node *parent_node = NULL;
1621 	struct btrfs_qgroup_extent_record *entry;
1622 	u64 bytenr = record->bytenr;
1623 
1624 	lockdep_assert_held(&delayed_refs->lock);
1625 	trace_btrfs_qgroup_trace_extent(fs_info, record);
1626 
1627 	while (*p) {
1628 		parent_node = *p;
1629 		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1630 				 node);
1631 		if (bytenr < entry->bytenr) {
1632 			p = &(*p)->rb_left;
1633 		} else if (bytenr > entry->bytenr) {
1634 			p = &(*p)->rb_right;
1635 		} else {
1636 			if (record->data_rsv && !entry->data_rsv) {
1637 				entry->data_rsv = record->data_rsv;
1638 				entry->data_rsv_refroot =
1639 					record->data_rsv_refroot;
1640 			}
1641 			return 1;
1642 		}
1643 	}
1644 
1645 	rb_link_node(&record->node, parent_node, p);
1646 	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1647 	return 0;
1648 }
1649 
1650 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1651 				   struct btrfs_qgroup_extent_record *qrecord)
1652 {
1653 	struct ulist *old_root;
1654 	u64 bytenr = qrecord->bytenr;
1655 	int ret;
1656 
1657 	ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1658 	if (ret < 0) {
1659 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1660 		btrfs_warn(fs_info,
1661 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1662 			ret);
1663 		return 0;
1664 	}
1665 
1666 	/*
1667 	 * Here we don't need to get the lock of
1668 	 * trans->transaction->delayed_refs, since inserted qrecord won't
1669 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
1670 	 *
1671 	 * So modifying qrecord->old_roots is safe here
1672 	 */
1673 	qrecord->old_roots = old_root;
1674 	return 0;
1675 }
1676 
1677 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1678 			      u64 num_bytes, gfp_t gfp_flag)
1679 {
1680 	struct btrfs_fs_info *fs_info = trans->fs_info;
1681 	struct btrfs_qgroup_extent_record *record;
1682 	struct btrfs_delayed_ref_root *delayed_refs;
1683 	int ret;
1684 
1685 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1686 	    || bytenr == 0 || num_bytes == 0)
1687 		return 0;
1688 	record = kzalloc(sizeof(*record), gfp_flag);
1689 	if (!record)
1690 		return -ENOMEM;
1691 
1692 	delayed_refs = &trans->transaction->delayed_refs;
1693 	record->bytenr = bytenr;
1694 	record->num_bytes = num_bytes;
1695 	record->old_roots = NULL;
1696 
1697 	spin_lock(&delayed_refs->lock);
1698 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1699 	spin_unlock(&delayed_refs->lock);
1700 	if (ret > 0) {
1701 		kfree(record);
1702 		return 0;
1703 	}
1704 	return btrfs_qgroup_trace_extent_post(fs_info, record);
1705 }
1706 
1707 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1708 				  struct extent_buffer *eb)
1709 {
1710 	struct btrfs_fs_info *fs_info = trans->fs_info;
1711 	int nr = btrfs_header_nritems(eb);
1712 	int i, extent_type, ret;
1713 	struct btrfs_key key;
1714 	struct btrfs_file_extent_item *fi;
1715 	u64 bytenr, num_bytes;
1716 
1717 	/* We can be called directly from walk_up_proc() */
1718 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1719 		return 0;
1720 
1721 	for (i = 0; i < nr; i++) {
1722 		btrfs_item_key_to_cpu(eb, &key, i);
1723 
1724 		if (key.type != BTRFS_EXTENT_DATA_KEY)
1725 			continue;
1726 
1727 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1728 		/* filter out non qgroup-accountable extents  */
1729 		extent_type = btrfs_file_extent_type(eb, fi);
1730 
1731 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1732 			continue;
1733 
1734 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1735 		if (!bytenr)
1736 			continue;
1737 
1738 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1739 
1740 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1741 						GFP_NOFS);
1742 		if (ret)
1743 			return ret;
1744 	}
1745 	cond_resched();
1746 	return 0;
1747 }
1748 
1749 /*
1750  * Walk up the tree from the bottom, freeing leaves and any interior
1751  * nodes which have had all slots visited. If a node (leaf or
1752  * interior) is freed, the node above it will have it's slot
1753  * incremented. The root node will never be freed.
1754  *
1755  * At the end of this function, we should have a path which has all
1756  * slots incremented to the next position for a search. If we need to
1757  * read a new node it will be NULL and the node above it will have the
1758  * correct slot selected for a later read.
1759  *
1760  * If we increment the root nodes slot counter past the number of
1761  * elements, 1 is returned to signal completion of the search.
1762  */
1763 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1764 {
1765 	int level = 0;
1766 	int nr, slot;
1767 	struct extent_buffer *eb;
1768 
1769 	if (root_level == 0)
1770 		return 1;
1771 
1772 	while (level <= root_level) {
1773 		eb = path->nodes[level];
1774 		nr = btrfs_header_nritems(eb);
1775 		path->slots[level]++;
1776 		slot = path->slots[level];
1777 		if (slot >= nr || level == 0) {
1778 			/*
1779 			 * Don't free the root -  we will detect this
1780 			 * condition after our loop and return a
1781 			 * positive value for caller to stop walking the tree.
1782 			 */
1783 			if (level != root_level) {
1784 				btrfs_tree_unlock_rw(eb, path->locks[level]);
1785 				path->locks[level] = 0;
1786 
1787 				free_extent_buffer(eb);
1788 				path->nodes[level] = NULL;
1789 				path->slots[level] = 0;
1790 			}
1791 		} else {
1792 			/*
1793 			 * We have a valid slot to walk back down
1794 			 * from. Stop here so caller can process these
1795 			 * new nodes.
1796 			 */
1797 			break;
1798 		}
1799 
1800 		level++;
1801 	}
1802 
1803 	eb = path->nodes[root_level];
1804 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
1805 		return 1;
1806 
1807 	return 0;
1808 }
1809 
1810 /*
1811  * Helper function to trace a subtree tree block swap.
1812  *
1813  * The swap will happen in highest tree block, but there may be a lot of
1814  * tree blocks involved.
1815  *
1816  * For example:
1817  *  OO = Old tree blocks
1818  *  NN = New tree blocks allocated during balance
1819  *
1820  *           File tree (257)                  Reloc tree for 257
1821  * L2              OO                                NN
1822  *               /    \                            /    \
1823  * L1          OO      OO (a)                    OO      NN (a)
1824  *            / \     / \                       / \     / \
1825  * L0       OO   OO OO   OO                   OO   OO NN   NN
1826  *                  (b)  (c)                          (b)  (c)
1827  *
1828  * When calling qgroup_trace_extent_swap(), we will pass:
1829  * @src_eb = OO(a)
1830  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1831  * @dst_level = 0
1832  * @root_level = 1
1833  *
1834  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1835  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1836  *
1837  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1838  *
1839  * 1) Tree search from @src_eb
1840  *    It should acts as a simplified btrfs_search_slot().
1841  *    The key for search can be extracted from @dst_path->nodes[dst_level]
1842  *    (first key).
1843  *
1844  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1845  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1846  *    They should be marked during previous (@dst_level = 1) iteration.
1847  *
1848  * 3) Mark file extents in leaves dirty
1849  *    We don't have good way to pick out new file extents only.
1850  *    So we still follow the old method by scanning all file extents in
1851  *    the leave.
1852  *
1853  * This function can free us from keeping two paths, thus later we only need
1854  * to care about how to iterate all new tree blocks in reloc tree.
1855  */
1856 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1857 				    struct extent_buffer *src_eb,
1858 				    struct btrfs_path *dst_path,
1859 				    int dst_level, int root_level,
1860 				    bool trace_leaf)
1861 {
1862 	struct btrfs_key key;
1863 	struct btrfs_path *src_path;
1864 	struct btrfs_fs_info *fs_info = trans->fs_info;
1865 	u32 nodesize = fs_info->nodesize;
1866 	int cur_level = root_level;
1867 	int ret;
1868 
1869 	BUG_ON(dst_level > root_level);
1870 	/* Level mismatch */
1871 	if (btrfs_header_level(src_eb) != root_level)
1872 		return -EINVAL;
1873 
1874 	src_path = btrfs_alloc_path();
1875 	if (!src_path) {
1876 		ret = -ENOMEM;
1877 		goto out;
1878 	}
1879 
1880 	if (dst_level)
1881 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1882 	else
1883 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1884 
1885 	/* For src_path */
1886 	atomic_inc(&src_eb->refs);
1887 	src_path->nodes[root_level] = src_eb;
1888 	src_path->slots[root_level] = dst_path->slots[root_level];
1889 	src_path->locks[root_level] = 0;
1890 
1891 	/* A simplified version of btrfs_search_slot() */
1892 	while (cur_level >= dst_level) {
1893 		struct btrfs_key src_key;
1894 		struct btrfs_key dst_key;
1895 
1896 		if (src_path->nodes[cur_level] == NULL) {
1897 			struct btrfs_key first_key;
1898 			struct extent_buffer *eb;
1899 			int parent_slot;
1900 			u64 child_gen;
1901 			u64 child_bytenr;
1902 
1903 			eb = src_path->nodes[cur_level + 1];
1904 			parent_slot = src_path->slots[cur_level + 1];
1905 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1906 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1907 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1908 
1909 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
1910 					     cur_level, &first_key);
1911 			if (IS_ERR(eb)) {
1912 				ret = PTR_ERR(eb);
1913 				goto out;
1914 			} else if (!extent_buffer_uptodate(eb)) {
1915 				free_extent_buffer(eb);
1916 				ret = -EIO;
1917 				goto out;
1918 			}
1919 
1920 			src_path->nodes[cur_level] = eb;
1921 
1922 			btrfs_tree_read_lock(eb);
1923 			btrfs_set_lock_blocking_read(eb);
1924 			src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1925 		}
1926 
1927 		src_path->slots[cur_level] = dst_path->slots[cur_level];
1928 		if (cur_level) {
1929 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
1930 					&dst_key, dst_path->slots[cur_level]);
1931 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
1932 					&src_key, src_path->slots[cur_level]);
1933 		} else {
1934 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
1935 					&dst_key, dst_path->slots[cur_level]);
1936 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
1937 					&src_key, src_path->slots[cur_level]);
1938 		}
1939 		/* Content mismatch, something went wrong */
1940 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
1941 			ret = -ENOENT;
1942 			goto out;
1943 		}
1944 		cur_level--;
1945 	}
1946 
1947 	/*
1948 	 * Now both @dst_path and @src_path have been populated, record the tree
1949 	 * blocks for qgroup accounting.
1950 	 */
1951 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
1952 			nodesize, GFP_NOFS);
1953 	if (ret < 0)
1954 		goto out;
1955 	ret = btrfs_qgroup_trace_extent(trans,
1956 			dst_path->nodes[dst_level]->start,
1957 			nodesize, GFP_NOFS);
1958 	if (ret < 0)
1959 		goto out;
1960 
1961 	/* Record leaf file extents */
1962 	if (dst_level == 0 && trace_leaf) {
1963 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
1964 		if (ret < 0)
1965 			goto out;
1966 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
1967 	}
1968 out:
1969 	btrfs_free_path(src_path);
1970 	return ret;
1971 }
1972 
1973 /*
1974  * Helper function to do recursive generation-aware depth-first search, to
1975  * locate all new tree blocks in a subtree of reloc tree.
1976  *
1977  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1978  *         reloc tree
1979  * L2         NN (a)
1980  *          /    \
1981  * L1    OO        NN (b)
1982  *      /  \      /  \
1983  * L0  OO  OO    OO  NN
1984  *               (c) (d)
1985  * If we pass:
1986  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1987  * @cur_level = 1
1988  * @root_level = 1
1989  *
1990  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1991  * above tree blocks along with their counter parts in file tree.
1992  * While during search, old tree blocks OO(c) will be skipped as tree block swap
1993  * won't affect OO(c).
1994  */
1995 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1996 					   struct extent_buffer *src_eb,
1997 					   struct btrfs_path *dst_path,
1998 					   int cur_level, int root_level,
1999 					   u64 last_snapshot, bool trace_leaf)
2000 {
2001 	struct btrfs_fs_info *fs_info = trans->fs_info;
2002 	struct extent_buffer *eb;
2003 	bool need_cleanup = false;
2004 	int ret = 0;
2005 	int i;
2006 
2007 	/* Level sanity check */
2008 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2009 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2010 	    root_level < cur_level) {
2011 		btrfs_err_rl(fs_info,
2012 			"%s: bad levels, cur_level=%d root_level=%d",
2013 			__func__, cur_level, root_level);
2014 		return -EUCLEAN;
2015 	}
2016 
2017 	/* Read the tree block if needed */
2018 	if (dst_path->nodes[cur_level] == NULL) {
2019 		struct btrfs_key first_key;
2020 		int parent_slot;
2021 		u64 child_gen;
2022 		u64 child_bytenr;
2023 
2024 		/*
2025 		 * dst_path->nodes[root_level] must be initialized before
2026 		 * calling this function.
2027 		 */
2028 		if (cur_level == root_level) {
2029 			btrfs_err_rl(fs_info,
2030 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2031 				__func__, root_level, root_level, cur_level);
2032 			return -EUCLEAN;
2033 		}
2034 
2035 		/*
2036 		 * We need to get child blockptr/gen from parent before we can
2037 		 * read it.
2038 		  */
2039 		eb = dst_path->nodes[cur_level + 1];
2040 		parent_slot = dst_path->slots[cur_level + 1];
2041 		child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2042 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2043 		btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2044 
2045 		/* This node is old, no need to trace */
2046 		if (child_gen < last_snapshot)
2047 			goto out;
2048 
2049 		eb = read_tree_block(fs_info, child_bytenr, child_gen,
2050 				     cur_level, &first_key);
2051 		if (IS_ERR(eb)) {
2052 			ret = PTR_ERR(eb);
2053 			goto out;
2054 		} else if (!extent_buffer_uptodate(eb)) {
2055 			free_extent_buffer(eb);
2056 			ret = -EIO;
2057 			goto out;
2058 		}
2059 
2060 		dst_path->nodes[cur_level] = eb;
2061 		dst_path->slots[cur_level] = 0;
2062 
2063 		btrfs_tree_read_lock(eb);
2064 		btrfs_set_lock_blocking_read(eb);
2065 		dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
2066 		need_cleanup = true;
2067 	}
2068 
2069 	/* Now record this tree block and its counter part for qgroups */
2070 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2071 				       root_level, trace_leaf);
2072 	if (ret < 0)
2073 		goto cleanup;
2074 
2075 	eb = dst_path->nodes[cur_level];
2076 
2077 	if (cur_level > 0) {
2078 		/* Iterate all child tree blocks */
2079 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2080 			/* Skip old tree blocks as they won't be swapped */
2081 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2082 				continue;
2083 			dst_path->slots[cur_level] = i;
2084 
2085 			/* Recursive call (at most 7 times) */
2086 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2087 					dst_path, cur_level - 1, root_level,
2088 					last_snapshot, trace_leaf);
2089 			if (ret < 0)
2090 				goto cleanup;
2091 		}
2092 	}
2093 
2094 cleanup:
2095 	if (need_cleanup) {
2096 		/* Clean up */
2097 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2098 				     dst_path->locks[cur_level]);
2099 		free_extent_buffer(dst_path->nodes[cur_level]);
2100 		dst_path->nodes[cur_level] = NULL;
2101 		dst_path->slots[cur_level] = 0;
2102 		dst_path->locks[cur_level] = 0;
2103 	}
2104 out:
2105 	return ret;
2106 }
2107 
2108 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2109 				struct extent_buffer *src_eb,
2110 				struct extent_buffer *dst_eb,
2111 				u64 last_snapshot, bool trace_leaf)
2112 {
2113 	struct btrfs_fs_info *fs_info = trans->fs_info;
2114 	struct btrfs_path *dst_path = NULL;
2115 	int level;
2116 	int ret;
2117 
2118 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2119 		return 0;
2120 
2121 	/* Wrong parameter order */
2122 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2123 		btrfs_err_rl(fs_info,
2124 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2125 			     btrfs_header_generation(src_eb),
2126 			     btrfs_header_generation(dst_eb));
2127 		return -EUCLEAN;
2128 	}
2129 
2130 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2131 		ret = -EIO;
2132 		goto out;
2133 	}
2134 
2135 	level = btrfs_header_level(dst_eb);
2136 	dst_path = btrfs_alloc_path();
2137 	if (!dst_path) {
2138 		ret = -ENOMEM;
2139 		goto out;
2140 	}
2141 	/* For dst_path */
2142 	atomic_inc(&dst_eb->refs);
2143 	dst_path->nodes[level] = dst_eb;
2144 	dst_path->slots[level] = 0;
2145 	dst_path->locks[level] = 0;
2146 
2147 	/* Do the generation aware breadth-first search */
2148 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2149 					      level, last_snapshot, trace_leaf);
2150 	if (ret < 0)
2151 		goto out;
2152 	ret = 0;
2153 
2154 out:
2155 	btrfs_free_path(dst_path);
2156 	if (ret < 0)
2157 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2158 	return ret;
2159 }
2160 
2161 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2162 			       struct extent_buffer *root_eb,
2163 			       u64 root_gen, int root_level)
2164 {
2165 	struct btrfs_fs_info *fs_info = trans->fs_info;
2166 	int ret = 0;
2167 	int level;
2168 	struct extent_buffer *eb = root_eb;
2169 	struct btrfs_path *path = NULL;
2170 
2171 	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2172 	BUG_ON(root_eb == NULL);
2173 
2174 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2175 		return 0;
2176 
2177 	if (!extent_buffer_uptodate(root_eb)) {
2178 		ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2179 		if (ret)
2180 			goto out;
2181 	}
2182 
2183 	if (root_level == 0) {
2184 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2185 		goto out;
2186 	}
2187 
2188 	path = btrfs_alloc_path();
2189 	if (!path)
2190 		return -ENOMEM;
2191 
2192 	/*
2193 	 * Walk down the tree.  Missing extent blocks are filled in as
2194 	 * we go. Metadata is accounted every time we read a new
2195 	 * extent block.
2196 	 *
2197 	 * When we reach a leaf, we account for file extent items in it,
2198 	 * walk back up the tree (adjusting slot pointers as we go)
2199 	 * and restart the search process.
2200 	 */
2201 	atomic_inc(&root_eb->refs);	/* For path */
2202 	path->nodes[root_level] = root_eb;
2203 	path->slots[root_level] = 0;
2204 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2205 walk_down:
2206 	level = root_level;
2207 	while (level >= 0) {
2208 		if (path->nodes[level] == NULL) {
2209 			struct btrfs_key first_key;
2210 			int parent_slot;
2211 			u64 child_gen;
2212 			u64 child_bytenr;
2213 
2214 			/*
2215 			 * We need to get child blockptr/gen from parent before
2216 			 * we can read it.
2217 			  */
2218 			eb = path->nodes[level + 1];
2219 			parent_slot = path->slots[level + 1];
2220 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2221 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2222 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2223 
2224 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
2225 					     level, &first_key);
2226 			if (IS_ERR(eb)) {
2227 				ret = PTR_ERR(eb);
2228 				goto out;
2229 			} else if (!extent_buffer_uptodate(eb)) {
2230 				free_extent_buffer(eb);
2231 				ret = -EIO;
2232 				goto out;
2233 			}
2234 
2235 			path->nodes[level] = eb;
2236 			path->slots[level] = 0;
2237 
2238 			btrfs_tree_read_lock(eb);
2239 			btrfs_set_lock_blocking_read(eb);
2240 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
2241 
2242 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2243 							fs_info->nodesize,
2244 							GFP_NOFS);
2245 			if (ret)
2246 				goto out;
2247 		}
2248 
2249 		if (level == 0) {
2250 			ret = btrfs_qgroup_trace_leaf_items(trans,
2251 							    path->nodes[level]);
2252 			if (ret)
2253 				goto out;
2254 
2255 			/* Nonzero return here means we completed our search */
2256 			ret = adjust_slots_upwards(path, root_level);
2257 			if (ret)
2258 				break;
2259 
2260 			/* Restart search with new slots */
2261 			goto walk_down;
2262 		}
2263 
2264 		level--;
2265 	}
2266 
2267 	ret = 0;
2268 out:
2269 	btrfs_free_path(path);
2270 
2271 	return ret;
2272 }
2273 
2274 #define UPDATE_NEW	0
2275 #define UPDATE_OLD	1
2276 /*
2277  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2278  */
2279 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2280 				struct ulist *roots, struct ulist *tmp,
2281 				struct ulist *qgroups, u64 seq, int update_old)
2282 {
2283 	struct ulist_node *unode;
2284 	struct ulist_iterator uiter;
2285 	struct ulist_node *tmp_unode;
2286 	struct ulist_iterator tmp_uiter;
2287 	struct btrfs_qgroup *qg;
2288 	int ret = 0;
2289 
2290 	if (!roots)
2291 		return 0;
2292 	ULIST_ITER_INIT(&uiter);
2293 	while ((unode = ulist_next(roots, &uiter))) {
2294 		qg = find_qgroup_rb(fs_info, unode->val);
2295 		if (!qg)
2296 			continue;
2297 
2298 		ulist_reinit(tmp);
2299 		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2300 				GFP_ATOMIC);
2301 		if (ret < 0)
2302 			return ret;
2303 		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2304 		if (ret < 0)
2305 			return ret;
2306 		ULIST_ITER_INIT(&tmp_uiter);
2307 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2308 			struct btrfs_qgroup_list *glist;
2309 
2310 			qg = unode_aux_to_qgroup(tmp_unode);
2311 			if (update_old)
2312 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2313 			else
2314 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2315 			list_for_each_entry(glist, &qg->groups, next_group) {
2316 				ret = ulist_add(qgroups, glist->group->qgroupid,
2317 						qgroup_to_aux(glist->group),
2318 						GFP_ATOMIC);
2319 				if (ret < 0)
2320 					return ret;
2321 				ret = ulist_add(tmp, glist->group->qgroupid,
2322 						qgroup_to_aux(glist->group),
2323 						GFP_ATOMIC);
2324 				if (ret < 0)
2325 					return ret;
2326 			}
2327 		}
2328 	}
2329 	return 0;
2330 }
2331 
2332 /*
2333  * Update qgroup rfer/excl counters.
2334  * Rfer update is easy, codes can explain themselves.
2335  *
2336  * Excl update is tricky, the update is split into 2 parts.
2337  * Part 1: Possible exclusive <-> sharing detect:
2338  *	|	A	|	!A	|
2339  *  -------------------------------------
2340  *  B	|	*	|	-	|
2341  *  -------------------------------------
2342  *  !B	|	+	|	**	|
2343  *  -------------------------------------
2344  *
2345  * Conditions:
2346  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2347  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2348  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2349  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2350  *
2351  * Results:
2352  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2353  * *: Definitely not changed.		**: Possible unchanged.
2354  *
2355  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2356  *
2357  * To make the logic clear, we first use condition A and B to split
2358  * combination into 4 results.
2359  *
2360  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2361  * only on variant maybe 0.
2362  *
2363  * Lastly, check result **, since there are 2 variants maybe 0, split them
2364  * again(2x2).
2365  * But this time we don't need to consider other things, the codes and logic
2366  * is easy to understand now.
2367  */
2368 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2369 				  struct ulist *qgroups,
2370 				  u64 nr_old_roots,
2371 				  u64 nr_new_roots,
2372 				  u64 num_bytes, u64 seq)
2373 {
2374 	struct ulist_node *unode;
2375 	struct ulist_iterator uiter;
2376 	struct btrfs_qgroup *qg;
2377 	u64 cur_new_count, cur_old_count;
2378 
2379 	ULIST_ITER_INIT(&uiter);
2380 	while ((unode = ulist_next(qgroups, &uiter))) {
2381 		bool dirty = false;
2382 
2383 		qg = unode_aux_to_qgroup(unode);
2384 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2385 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2386 
2387 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2388 					     cur_new_count);
2389 
2390 		/* Rfer update part */
2391 		if (cur_old_count == 0 && cur_new_count > 0) {
2392 			qg->rfer += num_bytes;
2393 			qg->rfer_cmpr += num_bytes;
2394 			dirty = true;
2395 		}
2396 		if (cur_old_count > 0 && cur_new_count == 0) {
2397 			qg->rfer -= num_bytes;
2398 			qg->rfer_cmpr -= num_bytes;
2399 			dirty = true;
2400 		}
2401 
2402 		/* Excl update part */
2403 		/* Exclusive/none -> shared case */
2404 		if (cur_old_count == nr_old_roots &&
2405 		    cur_new_count < nr_new_roots) {
2406 			/* Exclusive -> shared */
2407 			if (cur_old_count != 0) {
2408 				qg->excl -= num_bytes;
2409 				qg->excl_cmpr -= num_bytes;
2410 				dirty = true;
2411 			}
2412 		}
2413 
2414 		/* Shared -> exclusive/none case */
2415 		if (cur_old_count < nr_old_roots &&
2416 		    cur_new_count == nr_new_roots) {
2417 			/* Shared->exclusive */
2418 			if (cur_new_count != 0) {
2419 				qg->excl += num_bytes;
2420 				qg->excl_cmpr += num_bytes;
2421 				dirty = true;
2422 			}
2423 		}
2424 
2425 		/* Exclusive/none -> exclusive/none case */
2426 		if (cur_old_count == nr_old_roots &&
2427 		    cur_new_count == nr_new_roots) {
2428 			if (cur_old_count == 0) {
2429 				/* None -> exclusive/none */
2430 
2431 				if (cur_new_count != 0) {
2432 					/* None -> exclusive */
2433 					qg->excl += num_bytes;
2434 					qg->excl_cmpr += num_bytes;
2435 					dirty = true;
2436 				}
2437 				/* None -> none, nothing changed */
2438 			} else {
2439 				/* Exclusive -> exclusive/none */
2440 
2441 				if (cur_new_count == 0) {
2442 					/* Exclusive -> none */
2443 					qg->excl -= num_bytes;
2444 					qg->excl_cmpr -= num_bytes;
2445 					dirty = true;
2446 				}
2447 				/* Exclusive -> exclusive, nothing changed */
2448 			}
2449 		}
2450 
2451 		if (dirty)
2452 			qgroup_dirty(fs_info, qg);
2453 	}
2454 	return 0;
2455 }
2456 
2457 /*
2458  * Check if the @roots potentially is a list of fs tree roots
2459  *
2460  * Return 0 for definitely not a fs/subvol tree roots ulist
2461  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2462  *          one as well)
2463  */
2464 static int maybe_fs_roots(struct ulist *roots)
2465 {
2466 	struct ulist_node *unode;
2467 	struct ulist_iterator uiter;
2468 
2469 	/* Empty one, still possible for fs roots */
2470 	if (!roots || roots->nnodes == 0)
2471 		return 1;
2472 
2473 	ULIST_ITER_INIT(&uiter);
2474 	unode = ulist_next(roots, &uiter);
2475 	if (!unode)
2476 		return 1;
2477 
2478 	/*
2479 	 * If it contains fs tree roots, then it must belong to fs/subvol
2480 	 * trees.
2481 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2482 	 */
2483 	return is_fstree(unode->val);
2484 }
2485 
2486 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2487 				u64 num_bytes, struct ulist *old_roots,
2488 				struct ulist *new_roots)
2489 {
2490 	struct btrfs_fs_info *fs_info = trans->fs_info;
2491 	struct ulist *qgroups = NULL;
2492 	struct ulist *tmp = NULL;
2493 	u64 seq;
2494 	u64 nr_new_roots = 0;
2495 	u64 nr_old_roots = 0;
2496 	int ret = 0;
2497 
2498 	/*
2499 	 * If quotas get disabled meanwhile, the resouces need to be freed and
2500 	 * we can't just exit here.
2501 	 */
2502 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2503 		goto out_free;
2504 
2505 	if (new_roots) {
2506 		if (!maybe_fs_roots(new_roots))
2507 			goto out_free;
2508 		nr_new_roots = new_roots->nnodes;
2509 	}
2510 	if (old_roots) {
2511 		if (!maybe_fs_roots(old_roots))
2512 			goto out_free;
2513 		nr_old_roots = old_roots->nnodes;
2514 	}
2515 
2516 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2517 	if (nr_old_roots == 0 && nr_new_roots == 0)
2518 		goto out_free;
2519 
2520 	BUG_ON(!fs_info->quota_root);
2521 
2522 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2523 					num_bytes, nr_old_roots, nr_new_roots);
2524 
2525 	qgroups = ulist_alloc(GFP_NOFS);
2526 	if (!qgroups) {
2527 		ret = -ENOMEM;
2528 		goto out_free;
2529 	}
2530 	tmp = ulist_alloc(GFP_NOFS);
2531 	if (!tmp) {
2532 		ret = -ENOMEM;
2533 		goto out_free;
2534 	}
2535 
2536 	mutex_lock(&fs_info->qgroup_rescan_lock);
2537 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2538 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2539 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2540 			ret = 0;
2541 			goto out_free;
2542 		}
2543 	}
2544 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2545 
2546 	spin_lock(&fs_info->qgroup_lock);
2547 	seq = fs_info->qgroup_seq;
2548 
2549 	/* Update old refcnts using old_roots */
2550 	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2551 				   UPDATE_OLD);
2552 	if (ret < 0)
2553 		goto out;
2554 
2555 	/* Update new refcnts using new_roots */
2556 	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2557 				   UPDATE_NEW);
2558 	if (ret < 0)
2559 		goto out;
2560 
2561 	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2562 			       num_bytes, seq);
2563 
2564 	/*
2565 	 * Bump qgroup_seq to avoid seq overlap
2566 	 */
2567 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2568 out:
2569 	spin_unlock(&fs_info->qgroup_lock);
2570 out_free:
2571 	ulist_free(tmp);
2572 	ulist_free(qgroups);
2573 	ulist_free(old_roots);
2574 	ulist_free(new_roots);
2575 	return ret;
2576 }
2577 
2578 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2579 {
2580 	struct btrfs_fs_info *fs_info = trans->fs_info;
2581 	struct btrfs_qgroup_extent_record *record;
2582 	struct btrfs_delayed_ref_root *delayed_refs;
2583 	struct ulist *new_roots = NULL;
2584 	struct rb_node *node;
2585 	u64 num_dirty_extents = 0;
2586 	u64 qgroup_to_skip;
2587 	int ret = 0;
2588 
2589 	delayed_refs = &trans->transaction->delayed_refs;
2590 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2591 	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2592 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
2593 				  node);
2594 
2595 		num_dirty_extents++;
2596 		trace_btrfs_qgroup_account_extents(fs_info, record);
2597 
2598 		if (!ret) {
2599 			/*
2600 			 * Old roots should be searched when inserting qgroup
2601 			 * extent record
2602 			 */
2603 			if (WARN_ON(!record->old_roots)) {
2604 				/* Search commit root to find old_roots */
2605 				ret = btrfs_find_all_roots(NULL, fs_info,
2606 						record->bytenr, 0,
2607 						&record->old_roots, false);
2608 				if (ret < 0)
2609 					goto cleanup;
2610 			}
2611 
2612 			/* Free the reserved data space */
2613 			btrfs_qgroup_free_refroot(fs_info,
2614 					record->data_rsv_refroot,
2615 					record->data_rsv,
2616 					BTRFS_QGROUP_RSV_DATA);
2617 			/*
2618 			 * Use SEQ_LAST as time_seq to do special search, which
2619 			 * doesn't lock tree or delayed_refs and search current
2620 			 * root. It's safe inside commit_transaction().
2621 			 */
2622 			ret = btrfs_find_all_roots(trans, fs_info,
2623 				record->bytenr, SEQ_LAST, &new_roots, false);
2624 			if (ret < 0)
2625 				goto cleanup;
2626 			if (qgroup_to_skip) {
2627 				ulist_del(new_roots, qgroup_to_skip, 0);
2628 				ulist_del(record->old_roots, qgroup_to_skip,
2629 					  0);
2630 			}
2631 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2632 							  record->num_bytes,
2633 							  record->old_roots,
2634 							  new_roots);
2635 			record->old_roots = NULL;
2636 			new_roots = NULL;
2637 		}
2638 cleanup:
2639 		ulist_free(record->old_roots);
2640 		ulist_free(new_roots);
2641 		new_roots = NULL;
2642 		rb_erase(node, &delayed_refs->dirty_extent_root);
2643 		kfree(record);
2644 
2645 	}
2646 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2647 				       num_dirty_extents);
2648 	return ret;
2649 }
2650 
2651 /*
2652  * called from commit_transaction. Writes all changed qgroups to disk.
2653  */
2654 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2655 {
2656 	struct btrfs_fs_info *fs_info = trans->fs_info;
2657 	int ret = 0;
2658 
2659 	if (!fs_info->quota_root)
2660 		return ret;
2661 
2662 	spin_lock(&fs_info->qgroup_lock);
2663 	while (!list_empty(&fs_info->dirty_qgroups)) {
2664 		struct btrfs_qgroup *qgroup;
2665 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
2666 					  struct btrfs_qgroup, dirty);
2667 		list_del_init(&qgroup->dirty);
2668 		spin_unlock(&fs_info->qgroup_lock);
2669 		ret = update_qgroup_info_item(trans, qgroup);
2670 		if (ret)
2671 			fs_info->qgroup_flags |=
2672 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2673 		ret = update_qgroup_limit_item(trans, qgroup);
2674 		if (ret)
2675 			fs_info->qgroup_flags |=
2676 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2677 		spin_lock(&fs_info->qgroup_lock);
2678 	}
2679 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2680 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2681 	else
2682 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2683 	spin_unlock(&fs_info->qgroup_lock);
2684 
2685 	ret = update_qgroup_status_item(trans);
2686 	if (ret)
2687 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2688 
2689 	return ret;
2690 }
2691 
2692 /*
2693  * Copy the accounting information between qgroups. This is necessary
2694  * when a snapshot or a subvolume is created. Throwing an error will
2695  * cause a transaction abort so we take extra care here to only error
2696  * when a readonly fs is a reasonable outcome.
2697  */
2698 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2699 			 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2700 {
2701 	int ret = 0;
2702 	int i;
2703 	u64 *i_qgroups;
2704 	bool committing = false;
2705 	struct btrfs_fs_info *fs_info = trans->fs_info;
2706 	struct btrfs_root *quota_root;
2707 	struct btrfs_qgroup *srcgroup;
2708 	struct btrfs_qgroup *dstgroup;
2709 	bool need_rescan = false;
2710 	u32 level_size = 0;
2711 	u64 nums;
2712 
2713 	/*
2714 	 * There are only two callers of this function.
2715 	 *
2716 	 * One in create_subvol() in the ioctl context, which needs to hold
2717 	 * the qgroup_ioctl_lock.
2718 	 *
2719 	 * The other one in create_pending_snapshot() where no other qgroup
2720 	 * code can modify the fs as they all need to either start a new trans
2721 	 * or hold a trans handler, thus we don't need to hold
2722 	 * qgroup_ioctl_lock.
2723 	 * This would avoid long and complex lock chain and make lockdep happy.
2724 	 */
2725 	spin_lock(&fs_info->trans_lock);
2726 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2727 		committing = true;
2728 	spin_unlock(&fs_info->trans_lock);
2729 
2730 	if (!committing)
2731 		mutex_lock(&fs_info->qgroup_ioctl_lock);
2732 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2733 		goto out;
2734 
2735 	quota_root = fs_info->quota_root;
2736 	if (!quota_root) {
2737 		ret = -EINVAL;
2738 		goto out;
2739 	}
2740 
2741 	if (inherit) {
2742 		i_qgroups = (u64 *)(inherit + 1);
2743 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2744 		       2 * inherit->num_excl_copies;
2745 		for (i = 0; i < nums; ++i) {
2746 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2747 
2748 			/*
2749 			 * Zero out invalid groups so we can ignore
2750 			 * them later.
2751 			 */
2752 			if (!srcgroup ||
2753 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2754 				*i_qgroups = 0ULL;
2755 
2756 			++i_qgroups;
2757 		}
2758 	}
2759 
2760 	/*
2761 	 * create a tracking group for the subvol itself
2762 	 */
2763 	ret = add_qgroup_item(trans, quota_root, objectid);
2764 	if (ret)
2765 		goto out;
2766 
2767 	/*
2768 	 * add qgroup to all inherited groups
2769 	 */
2770 	if (inherit) {
2771 		i_qgroups = (u64 *)(inherit + 1);
2772 		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2773 			if (*i_qgroups == 0)
2774 				continue;
2775 			ret = add_qgroup_relation_item(trans, objectid,
2776 						       *i_qgroups);
2777 			if (ret && ret != -EEXIST)
2778 				goto out;
2779 			ret = add_qgroup_relation_item(trans, *i_qgroups,
2780 						       objectid);
2781 			if (ret && ret != -EEXIST)
2782 				goto out;
2783 		}
2784 		ret = 0;
2785 	}
2786 
2787 
2788 	spin_lock(&fs_info->qgroup_lock);
2789 
2790 	dstgroup = add_qgroup_rb(fs_info, objectid);
2791 	if (IS_ERR(dstgroup)) {
2792 		ret = PTR_ERR(dstgroup);
2793 		goto unlock;
2794 	}
2795 
2796 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2797 		dstgroup->lim_flags = inherit->lim.flags;
2798 		dstgroup->max_rfer = inherit->lim.max_rfer;
2799 		dstgroup->max_excl = inherit->lim.max_excl;
2800 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2801 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
2802 
2803 		ret = update_qgroup_limit_item(trans, dstgroup);
2804 		if (ret) {
2805 			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2806 			btrfs_info(fs_info,
2807 				   "unable to update quota limit for %llu",
2808 				   dstgroup->qgroupid);
2809 			goto unlock;
2810 		}
2811 	}
2812 
2813 	if (srcid) {
2814 		srcgroup = find_qgroup_rb(fs_info, srcid);
2815 		if (!srcgroup)
2816 			goto unlock;
2817 
2818 		/*
2819 		 * We call inherit after we clone the root in order to make sure
2820 		 * our counts don't go crazy, so at this point the only
2821 		 * difference between the two roots should be the root node.
2822 		 */
2823 		level_size = fs_info->nodesize;
2824 		dstgroup->rfer = srcgroup->rfer;
2825 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2826 		dstgroup->excl = level_size;
2827 		dstgroup->excl_cmpr = level_size;
2828 		srcgroup->excl = level_size;
2829 		srcgroup->excl_cmpr = level_size;
2830 
2831 		/* inherit the limit info */
2832 		dstgroup->lim_flags = srcgroup->lim_flags;
2833 		dstgroup->max_rfer = srcgroup->max_rfer;
2834 		dstgroup->max_excl = srcgroup->max_excl;
2835 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2836 		dstgroup->rsv_excl = srcgroup->rsv_excl;
2837 
2838 		qgroup_dirty(fs_info, dstgroup);
2839 		qgroup_dirty(fs_info, srcgroup);
2840 	}
2841 
2842 	if (!inherit)
2843 		goto unlock;
2844 
2845 	i_qgroups = (u64 *)(inherit + 1);
2846 	for (i = 0; i < inherit->num_qgroups; ++i) {
2847 		if (*i_qgroups) {
2848 			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2849 			if (ret)
2850 				goto unlock;
2851 		}
2852 		++i_qgroups;
2853 
2854 		/*
2855 		 * If we're doing a snapshot, and adding the snapshot to a new
2856 		 * qgroup, the numbers are guaranteed to be incorrect.
2857 		 */
2858 		if (srcid)
2859 			need_rescan = true;
2860 	}
2861 
2862 	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2863 		struct btrfs_qgroup *src;
2864 		struct btrfs_qgroup *dst;
2865 
2866 		if (!i_qgroups[0] || !i_qgroups[1])
2867 			continue;
2868 
2869 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2870 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2871 
2872 		if (!src || !dst) {
2873 			ret = -EINVAL;
2874 			goto unlock;
2875 		}
2876 
2877 		dst->rfer = src->rfer - level_size;
2878 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2879 
2880 		/* Manually tweaking numbers certainly needs a rescan */
2881 		need_rescan = true;
2882 	}
2883 	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2884 		struct btrfs_qgroup *src;
2885 		struct btrfs_qgroup *dst;
2886 
2887 		if (!i_qgroups[0] || !i_qgroups[1])
2888 			continue;
2889 
2890 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2891 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2892 
2893 		if (!src || !dst) {
2894 			ret = -EINVAL;
2895 			goto unlock;
2896 		}
2897 
2898 		dst->excl = src->excl + level_size;
2899 		dst->excl_cmpr = src->excl_cmpr + level_size;
2900 		need_rescan = true;
2901 	}
2902 
2903 unlock:
2904 	spin_unlock(&fs_info->qgroup_lock);
2905 	if (!ret)
2906 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
2907 out:
2908 	if (!committing)
2909 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
2910 	if (need_rescan)
2911 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2912 	return ret;
2913 }
2914 
2915 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2916 {
2917 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2918 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2919 		return false;
2920 
2921 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2922 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2923 		return false;
2924 
2925 	return true;
2926 }
2927 
2928 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2929 			  enum btrfs_qgroup_rsv_type type)
2930 {
2931 	struct btrfs_qgroup *qgroup;
2932 	struct btrfs_fs_info *fs_info = root->fs_info;
2933 	u64 ref_root = root->root_key.objectid;
2934 	int ret = 0;
2935 	struct ulist_node *unode;
2936 	struct ulist_iterator uiter;
2937 
2938 	if (!is_fstree(ref_root))
2939 		return 0;
2940 
2941 	if (num_bytes == 0)
2942 		return 0;
2943 
2944 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2945 	    capable(CAP_SYS_RESOURCE))
2946 		enforce = false;
2947 
2948 	spin_lock(&fs_info->qgroup_lock);
2949 	if (!fs_info->quota_root)
2950 		goto out;
2951 
2952 	qgroup = find_qgroup_rb(fs_info, ref_root);
2953 	if (!qgroup)
2954 		goto out;
2955 
2956 	/*
2957 	 * in a first step, we check all affected qgroups if any limits would
2958 	 * be exceeded
2959 	 */
2960 	ulist_reinit(fs_info->qgroup_ulist);
2961 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2962 			qgroup_to_aux(qgroup), GFP_ATOMIC);
2963 	if (ret < 0)
2964 		goto out;
2965 	ULIST_ITER_INIT(&uiter);
2966 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2967 		struct btrfs_qgroup *qg;
2968 		struct btrfs_qgroup_list *glist;
2969 
2970 		qg = unode_aux_to_qgroup(unode);
2971 
2972 		if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2973 			ret = -EDQUOT;
2974 			goto out;
2975 		}
2976 
2977 		list_for_each_entry(glist, &qg->groups, next_group) {
2978 			ret = ulist_add(fs_info->qgroup_ulist,
2979 					glist->group->qgroupid,
2980 					qgroup_to_aux(glist->group), GFP_ATOMIC);
2981 			if (ret < 0)
2982 				goto out;
2983 		}
2984 	}
2985 	ret = 0;
2986 	/*
2987 	 * no limits exceeded, now record the reservation into all qgroups
2988 	 */
2989 	ULIST_ITER_INIT(&uiter);
2990 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2991 		struct btrfs_qgroup *qg;
2992 
2993 		qg = unode_aux_to_qgroup(unode);
2994 
2995 		qgroup_rsv_add(fs_info, qg, num_bytes, type);
2996 	}
2997 
2998 out:
2999 	spin_unlock(&fs_info->qgroup_lock);
3000 	return ret;
3001 }
3002 
3003 /*
3004  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3005  * qgroup).
3006  *
3007  * Will handle all higher level qgroup too.
3008  *
3009  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3010  * This special case is only used for META_PERTRANS type.
3011  */
3012 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3013 			       u64 ref_root, u64 num_bytes,
3014 			       enum btrfs_qgroup_rsv_type type)
3015 {
3016 	struct btrfs_qgroup *qgroup;
3017 	struct ulist_node *unode;
3018 	struct ulist_iterator uiter;
3019 	int ret = 0;
3020 
3021 	if (!is_fstree(ref_root))
3022 		return;
3023 
3024 	if (num_bytes == 0)
3025 		return;
3026 
3027 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3028 		WARN(1, "%s: Invalid type to free", __func__);
3029 		return;
3030 	}
3031 	spin_lock(&fs_info->qgroup_lock);
3032 
3033 	if (!fs_info->quota_root)
3034 		goto out;
3035 
3036 	qgroup = find_qgroup_rb(fs_info, ref_root);
3037 	if (!qgroup)
3038 		goto out;
3039 
3040 	if (num_bytes == (u64)-1)
3041 		/*
3042 		 * We're freeing all pertrans rsv, get reserved value from
3043 		 * level 0 qgroup as real num_bytes to free.
3044 		 */
3045 		num_bytes = qgroup->rsv.values[type];
3046 
3047 	ulist_reinit(fs_info->qgroup_ulist);
3048 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3049 			qgroup_to_aux(qgroup), GFP_ATOMIC);
3050 	if (ret < 0)
3051 		goto out;
3052 	ULIST_ITER_INIT(&uiter);
3053 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3054 		struct btrfs_qgroup *qg;
3055 		struct btrfs_qgroup_list *glist;
3056 
3057 		qg = unode_aux_to_qgroup(unode);
3058 
3059 		qgroup_rsv_release(fs_info, qg, num_bytes, type);
3060 
3061 		list_for_each_entry(glist, &qg->groups, next_group) {
3062 			ret = ulist_add(fs_info->qgroup_ulist,
3063 					glist->group->qgroupid,
3064 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3065 			if (ret < 0)
3066 				goto out;
3067 		}
3068 	}
3069 
3070 out:
3071 	spin_unlock(&fs_info->qgroup_lock);
3072 }
3073 
3074 /*
3075  * Check if the leaf is the last leaf. Which means all node pointers
3076  * are at their last position.
3077  */
3078 static bool is_last_leaf(struct btrfs_path *path)
3079 {
3080 	int i;
3081 
3082 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3083 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3084 			return false;
3085 	}
3086 	return true;
3087 }
3088 
3089 /*
3090  * returns < 0 on error, 0 when more leafs are to be scanned.
3091  * returns 1 when done.
3092  */
3093 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3094 			      struct btrfs_path *path)
3095 {
3096 	struct btrfs_fs_info *fs_info = trans->fs_info;
3097 	struct btrfs_key found;
3098 	struct extent_buffer *scratch_leaf = NULL;
3099 	struct ulist *roots = NULL;
3100 	u64 num_bytes;
3101 	bool done;
3102 	int slot;
3103 	int ret;
3104 
3105 	mutex_lock(&fs_info->qgroup_rescan_lock);
3106 	ret = btrfs_search_slot_for_read(fs_info->extent_root,
3107 					 &fs_info->qgroup_rescan_progress,
3108 					 path, 1, 0);
3109 
3110 	btrfs_debug(fs_info,
3111 		"current progress key (%llu %u %llu), search_slot ret %d",
3112 		fs_info->qgroup_rescan_progress.objectid,
3113 		fs_info->qgroup_rescan_progress.type,
3114 		fs_info->qgroup_rescan_progress.offset, ret);
3115 
3116 	if (ret) {
3117 		/*
3118 		 * The rescan is about to end, we will not be scanning any
3119 		 * further blocks. We cannot unset the RESCAN flag here, because
3120 		 * we want to commit the transaction if everything went well.
3121 		 * To make the live accounting work in this phase, we set our
3122 		 * scan progress pointer such that every real extent objectid
3123 		 * will be smaller.
3124 		 */
3125 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3126 		btrfs_release_path(path);
3127 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3128 		return ret;
3129 	}
3130 	done = is_last_leaf(path);
3131 
3132 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3133 			      btrfs_header_nritems(path->nodes[0]) - 1);
3134 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3135 
3136 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3137 	if (!scratch_leaf) {
3138 		ret = -ENOMEM;
3139 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3140 		goto out;
3141 	}
3142 	slot = path->slots[0];
3143 	btrfs_release_path(path);
3144 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3145 
3146 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3147 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3148 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3149 		    found.type != BTRFS_METADATA_ITEM_KEY)
3150 			continue;
3151 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3152 			num_bytes = fs_info->nodesize;
3153 		else
3154 			num_bytes = found.offset;
3155 
3156 		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3157 					   &roots, false);
3158 		if (ret < 0)
3159 			goto out;
3160 		/* For rescan, just pass old_roots as NULL */
3161 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3162 						  num_bytes, NULL, roots);
3163 		if (ret < 0)
3164 			goto out;
3165 	}
3166 out:
3167 	if (scratch_leaf)
3168 		free_extent_buffer(scratch_leaf);
3169 
3170 	if (done && !ret) {
3171 		ret = 1;
3172 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3173 	}
3174 	return ret;
3175 }
3176 
3177 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3178 {
3179 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3180 						     qgroup_rescan_work);
3181 	struct btrfs_path *path;
3182 	struct btrfs_trans_handle *trans = NULL;
3183 	int err = -ENOMEM;
3184 	int ret = 0;
3185 
3186 	path = btrfs_alloc_path();
3187 	if (!path)
3188 		goto out;
3189 	/*
3190 	 * Rescan should only search for commit root, and any later difference
3191 	 * should be recorded by qgroup
3192 	 */
3193 	path->search_commit_root = 1;
3194 	path->skip_locking = 1;
3195 
3196 	err = 0;
3197 	while (!err && !btrfs_fs_closing(fs_info)) {
3198 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3199 		if (IS_ERR(trans)) {
3200 			err = PTR_ERR(trans);
3201 			break;
3202 		}
3203 		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3204 			err = -EINTR;
3205 		} else {
3206 			err = qgroup_rescan_leaf(trans, path);
3207 		}
3208 		if (err > 0)
3209 			btrfs_commit_transaction(trans);
3210 		else
3211 			btrfs_end_transaction(trans);
3212 	}
3213 
3214 out:
3215 	btrfs_free_path(path);
3216 
3217 	mutex_lock(&fs_info->qgroup_rescan_lock);
3218 	if (err > 0 &&
3219 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3220 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3221 	} else if (err < 0) {
3222 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3223 	}
3224 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3225 
3226 	/*
3227 	 * only update status, since the previous part has already updated the
3228 	 * qgroup info.
3229 	 */
3230 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
3231 	if (IS_ERR(trans)) {
3232 		err = PTR_ERR(trans);
3233 		trans = NULL;
3234 		btrfs_err(fs_info,
3235 			  "fail to start transaction for status update: %d",
3236 			  err);
3237 	}
3238 
3239 	mutex_lock(&fs_info->qgroup_rescan_lock);
3240 	if (!btrfs_fs_closing(fs_info))
3241 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3242 	if (trans) {
3243 		ret = update_qgroup_status_item(trans);
3244 		if (ret < 0) {
3245 			err = ret;
3246 			btrfs_err(fs_info, "fail to update qgroup status: %d",
3247 				  err);
3248 		}
3249 	}
3250 	fs_info->qgroup_rescan_running = false;
3251 	complete_all(&fs_info->qgroup_rescan_completion);
3252 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3253 
3254 	if (!trans)
3255 		return;
3256 
3257 	btrfs_end_transaction(trans);
3258 
3259 	if (btrfs_fs_closing(fs_info)) {
3260 		btrfs_info(fs_info, "qgroup scan paused");
3261 	} else if (err >= 0) {
3262 		btrfs_info(fs_info, "qgroup scan completed%s",
3263 			err > 0 ? " (inconsistency flag cleared)" : "");
3264 	} else {
3265 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
3266 	}
3267 }
3268 
3269 /*
3270  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3271  * memory required for the rescan context.
3272  */
3273 static int
3274 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3275 		   int init_flags)
3276 {
3277 	int ret = 0;
3278 
3279 	if (!init_flags) {
3280 		/* we're resuming qgroup rescan at mount time */
3281 		if (!(fs_info->qgroup_flags &
3282 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3283 			btrfs_warn(fs_info,
3284 			"qgroup rescan init failed, qgroup rescan is not queued");
3285 			ret = -EINVAL;
3286 		} else if (!(fs_info->qgroup_flags &
3287 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3288 			btrfs_warn(fs_info,
3289 			"qgroup rescan init failed, qgroup is not enabled");
3290 			ret = -EINVAL;
3291 		}
3292 
3293 		if (ret)
3294 			return ret;
3295 	}
3296 
3297 	mutex_lock(&fs_info->qgroup_rescan_lock);
3298 
3299 	if (init_flags) {
3300 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3301 			btrfs_warn(fs_info,
3302 				   "qgroup rescan is already in progress");
3303 			ret = -EINPROGRESS;
3304 		} else if (!(fs_info->qgroup_flags &
3305 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3306 			btrfs_warn(fs_info,
3307 			"qgroup rescan init failed, qgroup is not enabled");
3308 			ret = -EINVAL;
3309 		}
3310 
3311 		if (ret) {
3312 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3313 			return ret;
3314 		}
3315 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3316 	}
3317 
3318 	memset(&fs_info->qgroup_rescan_progress, 0,
3319 		sizeof(fs_info->qgroup_rescan_progress));
3320 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3321 	init_completion(&fs_info->qgroup_rescan_completion);
3322 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3323 
3324 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3325 			btrfs_qgroup_rescan_worker, NULL, NULL);
3326 	return 0;
3327 }
3328 
3329 static void
3330 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3331 {
3332 	struct rb_node *n;
3333 	struct btrfs_qgroup *qgroup;
3334 
3335 	spin_lock(&fs_info->qgroup_lock);
3336 	/* clear all current qgroup tracking information */
3337 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3338 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3339 		qgroup->rfer = 0;
3340 		qgroup->rfer_cmpr = 0;
3341 		qgroup->excl = 0;
3342 		qgroup->excl_cmpr = 0;
3343 		qgroup_dirty(fs_info, qgroup);
3344 	}
3345 	spin_unlock(&fs_info->qgroup_lock);
3346 }
3347 
3348 int
3349 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3350 {
3351 	int ret = 0;
3352 	struct btrfs_trans_handle *trans;
3353 
3354 	ret = qgroup_rescan_init(fs_info, 0, 1);
3355 	if (ret)
3356 		return ret;
3357 
3358 	/*
3359 	 * We have set the rescan_progress to 0, which means no more
3360 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3361 	 * However, btrfs_qgroup_account_ref may be right after its call
3362 	 * to btrfs_find_all_roots, in which case it would still do the
3363 	 * accounting.
3364 	 * To solve this, we're committing the transaction, which will
3365 	 * ensure we run all delayed refs and only after that, we are
3366 	 * going to clear all tracking information for a clean start.
3367 	 */
3368 
3369 	trans = btrfs_join_transaction(fs_info->fs_root);
3370 	if (IS_ERR(trans)) {
3371 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3372 		return PTR_ERR(trans);
3373 	}
3374 	ret = btrfs_commit_transaction(trans);
3375 	if (ret) {
3376 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3377 		return ret;
3378 	}
3379 
3380 	qgroup_rescan_zero_tracking(fs_info);
3381 
3382 	mutex_lock(&fs_info->qgroup_rescan_lock);
3383 	fs_info->qgroup_rescan_running = true;
3384 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
3385 			 &fs_info->qgroup_rescan_work);
3386 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3387 
3388 	return 0;
3389 }
3390 
3391 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3392 				     bool interruptible)
3393 {
3394 	int running;
3395 	int ret = 0;
3396 
3397 	mutex_lock(&fs_info->qgroup_rescan_lock);
3398 	running = fs_info->qgroup_rescan_running;
3399 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3400 
3401 	if (!running)
3402 		return 0;
3403 
3404 	if (interruptible)
3405 		ret = wait_for_completion_interruptible(
3406 					&fs_info->qgroup_rescan_completion);
3407 	else
3408 		wait_for_completion(&fs_info->qgroup_rescan_completion);
3409 
3410 	return ret;
3411 }
3412 
3413 /*
3414  * this is only called from open_ctree where we're still single threaded, thus
3415  * locking is omitted here.
3416  */
3417 void
3418 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3419 {
3420 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3421 		mutex_lock(&fs_info->qgroup_rescan_lock);
3422 		fs_info->qgroup_rescan_running = true;
3423 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3424 				 &fs_info->qgroup_rescan_work);
3425 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3426 	}
3427 }
3428 
3429 #define rbtree_iterate_from_safe(node, next, start)				\
3430        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3431 
3432 static int qgroup_unreserve_range(struct btrfs_inode *inode,
3433 				  struct extent_changeset *reserved, u64 start,
3434 				  u64 len)
3435 {
3436 	struct rb_node *node;
3437 	struct rb_node *next;
3438 	struct ulist_node *entry = NULL;
3439 	int ret = 0;
3440 
3441 	node = reserved->range_changed.root.rb_node;
3442 	while (node) {
3443 		entry = rb_entry(node, struct ulist_node, rb_node);
3444 		if (entry->val < start)
3445 			node = node->rb_right;
3446 		else if (entry)
3447 			node = node->rb_left;
3448 		else
3449 			break;
3450 	}
3451 
3452 	/* Empty changeset */
3453 	if (!entry)
3454 		return 0;
3455 
3456 	if (entry->val > start && rb_prev(&entry->rb_node))
3457 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
3458 				 rb_node);
3459 
3460 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
3461 		u64 entry_start;
3462 		u64 entry_end;
3463 		u64 entry_len;
3464 		int clear_ret;
3465 
3466 		entry = rb_entry(node, struct ulist_node, rb_node);
3467 		entry_start = entry->val;
3468 		entry_end = entry->aux;
3469 		entry_len = entry_end - entry_start + 1;
3470 
3471 		if (entry_start >= start + len)
3472 			break;
3473 		if (entry_start + entry_len <= start)
3474 			continue;
3475 		/*
3476 		 * Now the entry is in [start, start + len), revert the
3477 		 * EXTENT_QGROUP_RESERVED bit.
3478 		 */
3479 		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
3480 					      entry_end, EXTENT_QGROUP_RESERVED);
3481 		if (!ret && clear_ret < 0)
3482 			ret = clear_ret;
3483 
3484 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
3485 		if (likely(reserved->bytes_changed >= entry_len)) {
3486 			reserved->bytes_changed -= entry_len;
3487 		} else {
3488 			WARN_ON(1);
3489 			reserved->bytes_changed = 0;
3490 		}
3491 	}
3492 
3493 	return ret;
3494 }
3495 
3496 /*
3497  * Try to free some space for qgroup.
3498  *
3499  * For qgroup, there are only 3 ways to free qgroup space:
3500  * - Flush nodatacow write
3501  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
3502  *   In theory, we should only flush nodatacow inodes, but it's not yet
3503  *   possible, so we need to flush the whole root.
3504  *
3505  * - Wait for ordered extents
3506  *   When ordered extents are finished, their reserved metadata is finally
3507  *   converted to per_trans status, which can be freed by later commit
3508  *   transaction.
3509  *
3510  * - Commit transaction
3511  *   This would free the meta_per_trans space.
3512  *   In theory this shouldn't provide much space, but any more qgroup space
3513  *   is needed.
3514  */
3515 static int try_flush_qgroup(struct btrfs_root *root)
3516 {
3517 	struct btrfs_trans_handle *trans;
3518 	int ret;
3519 
3520 	/*
3521 	 * We don't want to run flush again and again, so if there is a running
3522 	 * one, we won't try to start a new flush, but exit directly.
3523 	 */
3524 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
3525 		wait_event(root->qgroup_flush_wait,
3526 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
3527 		return 0;
3528 	}
3529 
3530 	ret = btrfs_start_delalloc_snapshot(root);
3531 	if (ret < 0)
3532 		goto out;
3533 	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
3534 
3535 	trans = btrfs_join_transaction(root);
3536 	if (IS_ERR(trans)) {
3537 		ret = PTR_ERR(trans);
3538 		goto out;
3539 	}
3540 
3541 	ret = btrfs_commit_transaction(trans);
3542 out:
3543 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
3544 	wake_up(&root->qgroup_flush_wait);
3545 	return ret;
3546 }
3547 
3548 static int qgroup_reserve_data(struct btrfs_inode *inode,
3549 			struct extent_changeset **reserved_ret, u64 start,
3550 			u64 len)
3551 {
3552 	struct btrfs_root *root = inode->root;
3553 	struct extent_changeset *reserved;
3554 	bool new_reserved = false;
3555 	u64 orig_reserved;
3556 	u64 to_reserve;
3557 	int ret;
3558 
3559 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3560 	    !is_fstree(root->root_key.objectid) || len == 0)
3561 		return 0;
3562 
3563 	/* @reserved parameter is mandatory for qgroup */
3564 	if (WARN_ON(!reserved_ret))
3565 		return -EINVAL;
3566 	if (!*reserved_ret) {
3567 		new_reserved = true;
3568 		*reserved_ret = extent_changeset_alloc();
3569 		if (!*reserved_ret)
3570 			return -ENOMEM;
3571 	}
3572 	reserved = *reserved_ret;
3573 	/* Record already reserved space */
3574 	orig_reserved = reserved->bytes_changed;
3575 	ret = set_record_extent_bits(&inode->io_tree, start,
3576 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3577 
3578 	/* Newly reserved space */
3579 	to_reserve = reserved->bytes_changed - orig_reserved;
3580 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
3581 					to_reserve, QGROUP_RESERVE);
3582 	if (ret < 0)
3583 		goto out;
3584 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3585 	if (ret < 0)
3586 		goto cleanup;
3587 
3588 	return ret;
3589 
3590 cleanup:
3591 	qgroup_unreserve_range(inode, reserved, start, len);
3592 out:
3593 	if (new_reserved) {
3594 		extent_changeset_release(reserved);
3595 		kfree(reserved);
3596 		*reserved_ret = NULL;
3597 	}
3598 	return ret;
3599 }
3600 
3601 /*
3602  * Reserve qgroup space for range [start, start + len).
3603  *
3604  * This function will either reserve space from related qgroups or do nothing
3605  * if the range is already reserved.
3606  *
3607  * Return 0 for successful reservation
3608  * Return <0 for error (including -EQUOT)
3609  *
3610  * NOTE: This function may sleep for memory allocation, dirty page flushing and
3611  *	 commit transaction. So caller should not hold any dirty page locked.
3612  */
3613 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
3614 			struct extent_changeset **reserved_ret, u64 start,
3615 			u64 len)
3616 {
3617 	int ret;
3618 
3619 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
3620 	if (ret <= 0 && ret != -EDQUOT)
3621 		return ret;
3622 
3623 	ret = try_flush_qgroup(inode->root);
3624 	if (ret < 0)
3625 		return ret;
3626 	return qgroup_reserve_data(inode, reserved_ret, start, len);
3627 }
3628 
3629 /* Free ranges specified by @reserved, normally in error path */
3630 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
3631 			struct extent_changeset *reserved, u64 start, u64 len)
3632 {
3633 	struct btrfs_root *root = inode->root;
3634 	struct ulist_node *unode;
3635 	struct ulist_iterator uiter;
3636 	struct extent_changeset changeset;
3637 	int freed = 0;
3638 	int ret;
3639 
3640 	extent_changeset_init(&changeset);
3641 	len = round_up(start + len, root->fs_info->sectorsize);
3642 	start = round_down(start, root->fs_info->sectorsize);
3643 
3644 	ULIST_ITER_INIT(&uiter);
3645 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3646 		u64 range_start = unode->val;
3647 		/* unode->aux is the inclusive end */
3648 		u64 range_len = unode->aux - range_start + 1;
3649 		u64 free_start;
3650 		u64 free_len;
3651 
3652 		extent_changeset_release(&changeset);
3653 
3654 		/* Only free range in range [start, start + len) */
3655 		if (range_start >= start + len ||
3656 		    range_start + range_len <= start)
3657 			continue;
3658 		free_start = max(range_start, start);
3659 		free_len = min(start + len, range_start + range_len) -
3660 			   free_start;
3661 		/*
3662 		 * TODO: To also modify reserved->ranges_reserved to reflect
3663 		 * the modification.
3664 		 *
3665 		 * However as long as we free qgroup reserved according to
3666 		 * EXTENT_QGROUP_RESERVED, we won't double free.
3667 		 * So not need to rush.
3668 		 */
3669 		ret = clear_record_extent_bits(&inode->io_tree, free_start,
3670 				free_start + free_len - 1,
3671 				EXTENT_QGROUP_RESERVED, &changeset);
3672 		if (ret < 0)
3673 			goto out;
3674 		freed += changeset.bytes_changed;
3675 	}
3676 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3677 				  BTRFS_QGROUP_RSV_DATA);
3678 	ret = freed;
3679 out:
3680 	extent_changeset_release(&changeset);
3681 	return ret;
3682 }
3683 
3684 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
3685 			struct extent_changeset *reserved, u64 start, u64 len,
3686 			int free)
3687 {
3688 	struct extent_changeset changeset;
3689 	int trace_op = QGROUP_RELEASE;
3690 	int ret;
3691 
3692 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
3693 		return 0;
3694 
3695 	/* In release case, we shouldn't have @reserved */
3696 	WARN_ON(!free && reserved);
3697 	if (free && reserved)
3698 		return qgroup_free_reserved_data(inode, reserved, start, len);
3699 	extent_changeset_init(&changeset);
3700 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
3701 				       EXTENT_QGROUP_RESERVED, &changeset);
3702 	if (ret < 0)
3703 		goto out;
3704 
3705 	if (free)
3706 		trace_op = QGROUP_FREE;
3707 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
3708 					changeset.bytes_changed, trace_op);
3709 	if (free)
3710 		btrfs_qgroup_free_refroot(inode->root->fs_info,
3711 				inode->root->root_key.objectid,
3712 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3713 	ret = changeset.bytes_changed;
3714 out:
3715 	extent_changeset_release(&changeset);
3716 	return ret;
3717 }
3718 
3719 /*
3720  * Free a reserved space range from io_tree and related qgroups
3721  *
3722  * Should be called when a range of pages get invalidated before reaching disk.
3723  * Or for error cleanup case.
3724  * if @reserved is given, only reserved range in [@start, @start + @len) will
3725  * be freed.
3726  *
3727  * For data written to disk, use btrfs_qgroup_release_data().
3728  *
3729  * NOTE: This function may sleep for memory allocation.
3730  */
3731 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
3732 			struct extent_changeset *reserved, u64 start, u64 len)
3733 {
3734 	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3735 }
3736 
3737 /*
3738  * Release a reserved space range from io_tree only.
3739  *
3740  * Should be called when a range of pages get written to disk and corresponding
3741  * FILE_EXTENT is inserted into corresponding root.
3742  *
3743  * Since new qgroup accounting framework will only update qgroup numbers at
3744  * commit_transaction() time, its reserved space shouldn't be freed from
3745  * related qgroups.
3746  *
3747  * But we should release the range from io_tree, to allow further write to be
3748  * COWed.
3749  *
3750  * NOTE: This function may sleep for memory allocation.
3751  */
3752 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
3753 {
3754 	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3755 }
3756 
3757 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3758 			      enum btrfs_qgroup_rsv_type type)
3759 {
3760 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3761 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3762 		return;
3763 	if (num_bytes == 0)
3764 		return;
3765 
3766 	spin_lock(&root->qgroup_meta_rsv_lock);
3767 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3768 		root->qgroup_meta_rsv_prealloc += num_bytes;
3769 	else
3770 		root->qgroup_meta_rsv_pertrans += num_bytes;
3771 	spin_unlock(&root->qgroup_meta_rsv_lock);
3772 }
3773 
3774 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3775 			     enum btrfs_qgroup_rsv_type type)
3776 {
3777 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3778 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3779 		return 0;
3780 	if (num_bytes == 0)
3781 		return 0;
3782 
3783 	spin_lock(&root->qgroup_meta_rsv_lock);
3784 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3785 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3786 				  num_bytes);
3787 		root->qgroup_meta_rsv_prealloc -= num_bytes;
3788 	} else {
3789 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3790 				  num_bytes);
3791 		root->qgroup_meta_rsv_pertrans -= num_bytes;
3792 	}
3793 	spin_unlock(&root->qgroup_meta_rsv_lock);
3794 	return num_bytes;
3795 }
3796 
3797 static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3798 				enum btrfs_qgroup_rsv_type type, bool enforce)
3799 {
3800 	struct btrfs_fs_info *fs_info = root->fs_info;
3801 	int ret;
3802 
3803 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3804 	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
3805 		return 0;
3806 
3807 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3808 	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
3809 	ret = qgroup_reserve(root, num_bytes, enforce, type);
3810 	if (ret < 0)
3811 		return ret;
3812 	/*
3813 	 * Record what we have reserved into root.
3814 	 *
3815 	 * To avoid quota disabled->enabled underflow.
3816 	 * In that case, we may try to free space we haven't reserved
3817 	 * (since quota was disabled), so record what we reserved into root.
3818 	 * And ensure later release won't underflow this number.
3819 	 */
3820 	add_root_meta_rsv(root, num_bytes, type);
3821 	return ret;
3822 }
3823 
3824 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3825 				enum btrfs_qgroup_rsv_type type, bool enforce)
3826 {
3827 	int ret;
3828 
3829 	ret = qgroup_reserve_meta(root, num_bytes, type, enforce);
3830 	if (ret <= 0 && ret != -EDQUOT)
3831 		return ret;
3832 
3833 	ret = try_flush_qgroup(root);
3834 	if (ret < 0)
3835 		return ret;
3836 	return qgroup_reserve_meta(root, num_bytes, type, enforce);
3837 }
3838 
3839 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3840 {
3841 	struct btrfs_fs_info *fs_info = root->fs_info;
3842 
3843 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3844 	    !is_fstree(root->root_key.objectid))
3845 		return;
3846 
3847 	/* TODO: Update trace point to handle such free */
3848 	trace_qgroup_meta_free_all_pertrans(root);
3849 	/* Special value -1 means to free all reserved space */
3850 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3851 				  BTRFS_QGROUP_RSV_META_PERTRANS);
3852 }
3853 
3854 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3855 			      enum btrfs_qgroup_rsv_type type)
3856 {
3857 	struct btrfs_fs_info *fs_info = root->fs_info;
3858 
3859 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3860 	    !is_fstree(root->root_key.objectid))
3861 		return;
3862 
3863 	/*
3864 	 * reservation for META_PREALLOC can happen before quota is enabled,
3865 	 * which can lead to underflow.
3866 	 * Here ensure we will only free what we really have reserved.
3867 	 */
3868 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3869 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3870 	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
3871 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
3872 				  num_bytes, type);
3873 }
3874 
3875 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3876 				int num_bytes)
3877 {
3878 	struct btrfs_qgroup *qgroup;
3879 	struct ulist_node *unode;
3880 	struct ulist_iterator uiter;
3881 	int ret = 0;
3882 
3883 	if (num_bytes == 0)
3884 		return;
3885 	if (!fs_info->quota_root)
3886 		return;
3887 
3888 	spin_lock(&fs_info->qgroup_lock);
3889 	qgroup = find_qgroup_rb(fs_info, ref_root);
3890 	if (!qgroup)
3891 		goto out;
3892 	ulist_reinit(fs_info->qgroup_ulist);
3893 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3894 		       qgroup_to_aux(qgroup), GFP_ATOMIC);
3895 	if (ret < 0)
3896 		goto out;
3897 	ULIST_ITER_INIT(&uiter);
3898 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3899 		struct btrfs_qgroup *qg;
3900 		struct btrfs_qgroup_list *glist;
3901 
3902 		qg = unode_aux_to_qgroup(unode);
3903 
3904 		qgroup_rsv_release(fs_info, qg, num_bytes,
3905 				BTRFS_QGROUP_RSV_META_PREALLOC);
3906 		qgroup_rsv_add(fs_info, qg, num_bytes,
3907 				BTRFS_QGROUP_RSV_META_PERTRANS);
3908 		list_for_each_entry(glist, &qg->groups, next_group) {
3909 			ret = ulist_add(fs_info->qgroup_ulist,
3910 					glist->group->qgroupid,
3911 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3912 			if (ret < 0)
3913 				goto out;
3914 		}
3915 	}
3916 out:
3917 	spin_unlock(&fs_info->qgroup_lock);
3918 }
3919 
3920 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3921 {
3922 	struct btrfs_fs_info *fs_info = root->fs_info;
3923 
3924 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3925 	    !is_fstree(root->root_key.objectid))
3926 		return;
3927 	/* Same as btrfs_qgroup_free_meta_prealloc() */
3928 	num_bytes = sub_root_meta_rsv(root, num_bytes,
3929 				      BTRFS_QGROUP_RSV_META_PREALLOC);
3930 	trace_qgroup_meta_convert(root, num_bytes);
3931 	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
3932 }
3933 
3934 /*
3935  * Check qgroup reserved space leaking, normally at destroy inode
3936  * time
3937  */
3938 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
3939 {
3940 	struct extent_changeset changeset;
3941 	struct ulist_node *unode;
3942 	struct ulist_iterator iter;
3943 	int ret;
3944 
3945 	extent_changeset_init(&changeset);
3946 	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
3947 			EXTENT_QGROUP_RESERVED, &changeset);
3948 
3949 	WARN_ON(ret < 0);
3950 	if (WARN_ON(changeset.bytes_changed)) {
3951 		ULIST_ITER_INIT(&iter);
3952 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3953 			btrfs_warn(inode->root->fs_info,
3954 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
3955 				btrfs_ino(inode), unode->val, unode->aux);
3956 		}
3957 		btrfs_qgroup_free_refroot(inode->root->fs_info,
3958 				inode->root->root_key.objectid,
3959 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3960 
3961 	}
3962 	extent_changeset_release(&changeset);
3963 }
3964 
3965 void btrfs_qgroup_init_swapped_blocks(
3966 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
3967 {
3968 	int i;
3969 
3970 	spin_lock_init(&swapped_blocks->lock);
3971 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3972 		swapped_blocks->blocks[i] = RB_ROOT;
3973 	swapped_blocks->swapped = false;
3974 }
3975 
3976 /*
3977  * Delete all swapped blocks record of @root.
3978  * Every record here means we skipped a full subtree scan for qgroup.
3979  *
3980  * Gets called when committing one transaction.
3981  */
3982 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
3983 {
3984 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
3985 	int i;
3986 
3987 	swapped_blocks = &root->swapped_blocks;
3988 
3989 	spin_lock(&swapped_blocks->lock);
3990 	if (!swapped_blocks->swapped)
3991 		goto out;
3992 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3993 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
3994 		struct btrfs_qgroup_swapped_block *entry;
3995 		struct btrfs_qgroup_swapped_block *next;
3996 
3997 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
3998 						     node)
3999 			kfree(entry);
4000 		swapped_blocks->blocks[i] = RB_ROOT;
4001 	}
4002 	swapped_blocks->swapped = false;
4003 out:
4004 	spin_unlock(&swapped_blocks->lock);
4005 }
4006 
4007 /*
4008  * Add subtree roots record into @subvol_root.
4009  *
4010  * @subvol_root:	tree root of the subvolume tree get swapped
4011  * @bg:			block group under balance
4012  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4013  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4014  *			BOTH POINTERS ARE BEFORE TREE SWAP
4015  * @last_snapshot:	last snapshot generation of the subvolume tree
4016  */
4017 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4018 		struct btrfs_root *subvol_root,
4019 		struct btrfs_block_group *bg,
4020 		struct extent_buffer *subvol_parent, int subvol_slot,
4021 		struct extent_buffer *reloc_parent, int reloc_slot,
4022 		u64 last_snapshot)
4023 {
4024 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4025 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4026 	struct btrfs_qgroup_swapped_block *block;
4027 	struct rb_node **cur;
4028 	struct rb_node *parent = NULL;
4029 	int level = btrfs_header_level(subvol_parent) - 1;
4030 	int ret = 0;
4031 
4032 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4033 		return 0;
4034 
4035 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4036 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4037 		btrfs_err_rl(fs_info,
4038 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4039 			__func__,
4040 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4041 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4042 		return -EUCLEAN;
4043 	}
4044 
4045 	block = kmalloc(sizeof(*block), GFP_NOFS);
4046 	if (!block) {
4047 		ret = -ENOMEM;
4048 		goto out;
4049 	}
4050 
4051 	/*
4052 	 * @reloc_parent/slot is still before swap, while @block is going to
4053 	 * record the bytenr after swap, so we do the swap here.
4054 	 */
4055 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4056 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4057 							     reloc_slot);
4058 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4059 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4060 							    subvol_slot);
4061 	block->last_snapshot = last_snapshot;
4062 	block->level = level;
4063 
4064 	/*
4065 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4066 	 * no one else can modify tree blocks thus we qgroup will not change
4067 	 * no matter the value of trace_leaf.
4068 	 */
4069 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4070 		block->trace_leaf = true;
4071 	else
4072 		block->trace_leaf = false;
4073 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4074 
4075 	/* Insert @block into @blocks */
4076 	spin_lock(&blocks->lock);
4077 	cur = &blocks->blocks[level].rb_node;
4078 	while (*cur) {
4079 		struct btrfs_qgroup_swapped_block *entry;
4080 
4081 		parent = *cur;
4082 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4083 				 node);
4084 
4085 		if (entry->subvol_bytenr < block->subvol_bytenr) {
4086 			cur = &(*cur)->rb_left;
4087 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4088 			cur = &(*cur)->rb_right;
4089 		} else {
4090 			if (entry->subvol_generation !=
4091 					block->subvol_generation ||
4092 			    entry->reloc_bytenr != block->reloc_bytenr ||
4093 			    entry->reloc_generation !=
4094 					block->reloc_generation) {
4095 				/*
4096 				 * Duplicated but mismatch entry found.
4097 				 * Shouldn't happen.
4098 				 *
4099 				 * Marking qgroup inconsistent should be enough
4100 				 * for end users.
4101 				 */
4102 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4103 				ret = -EEXIST;
4104 			}
4105 			kfree(block);
4106 			goto out_unlock;
4107 		}
4108 	}
4109 	rb_link_node(&block->node, parent, cur);
4110 	rb_insert_color(&block->node, &blocks->blocks[level]);
4111 	blocks->swapped = true;
4112 out_unlock:
4113 	spin_unlock(&blocks->lock);
4114 out:
4115 	if (ret < 0)
4116 		fs_info->qgroup_flags |=
4117 			BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4118 	return ret;
4119 }
4120 
4121 /*
4122  * Check if the tree block is a subtree root, and if so do the needed
4123  * delayed subtree trace for qgroup.
4124  *
4125  * This is called during btrfs_cow_block().
4126  */
4127 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4128 					 struct btrfs_root *root,
4129 					 struct extent_buffer *subvol_eb)
4130 {
4131 	struct btrfs_fs_info *fs_info = root->fs_info;
4132 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4133 	struct btrfs_qgroup_swapped_block *block;
4134 	struct extent_buffer *reloc_eb = NULL;
4135 	struct rb_node *node;
4136 	bool found = false;
4137 	bool swapped = false;
4138 	int level = btrfs_header_level(subvol_eb);
4139 	int ret = 0;
4140 	int i;
4141 
4142 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4143 		return 0;
4144 	if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
4145 		return 0;
4146 
4147 	spin_lock(&blocks->lock);
4148 	if (!blocks->swapped) {
4149 		spin_unlock(&blocks->lock);
4150 		return 0;
4151 	}
4152 	node = blocks->blocks[level].rb_node;
4153 
4154 	while (node) {
4155 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4156 		if (block->subvol_bytenr < subvol_eb->start) {
4157 			node = node->rb_left;
4158 		} else if (block->subvol_bytenr > subvol_eb->start) {
4159 			node = node->rb_right;
4160 		} else {
4161 			found = true;
4162 			break;
4163 		}
4164 	}
4165 	if (!found) {
4166 		spin_unlock(&blocks->lock);
4167 		goto out;
4168 	}
4169 	/* Found one, remove it from @blocks first and update blocks->swapped */
4170 	rb_erase(&block->node, &blocks->blocks[level]);
4171 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4172 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4173 			swapped = true;
4174 			break;
4175 		}
4176 	}
4177 	blocks->swapped = swapped;
4178 	spin_unlock(&blocks->lock);
4179 
4180 	/* Read out reloc subtree root */
4181 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
4182 				   block->reloc_generation, block->level,
4183 				   &block->first_key);
4184 	if (IS_ERR(reloc_eb)) {
4185 		ret = PTR_ERR(reloc_eb);
4186 		reloc_eb = NULL;
4187 		goto free_out;
4188 	}
4189 	if (!extent_buffer_uptodate(reloc_eb)) {
4190 		ret = -EIO;
4191 		goto free_out;
4192 	}
4193 
4194 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4195 			block->last_snapshot, block->trace_leaf);
4196 free_out:
4197 	kfree(block);
4198 	free_extent_buffer(reloc_eb);
4199 out:
4200 	if (ret < 0) {
4201 		btrfs_err_rl(fs_info,
4202 			     "failed to account subtree at bytenr %llu: %d",
4203 			     subvol_eb->start, ret);
4204 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4205 	}
4206 	return ret;
4207 }
4208 
4209 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4210 {
4211 	struct btrfs_qgroup_extent_record *entry;
4212 	struct btrfs_qgroup_extent_record *next;
4213 	struct rb_root *root;
4214 
4215 	root = &trans->delayed_refs.dirty_extent_root;
4216 	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4217 		ulist_free(entry->old_roots);
4218 		kfree(entry);
4219 	}
4220 }
4221