xref: /openbmc/linux/fs/btrfs/qgroup.c (revision f616f5cd9da7fceb7d884812da380b26040cd083)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 
25 
26 /* TODO XXX FIXME
27  *  - subvol delete -> delete when ref goes to 0? delete limits also?
28  *  - reorganize keys
29  *  - compressed
30  *  - sync
31  *  - copy also limits on subvol creation
32  *  - limit
33  *  - caches for ulists
34  *  - performance benchmarks
35  *  - check all ioctl parameters
36  */
37 
38 /*
39  * Helpers to access qgroup reservation
40  *
41  * Callers should ensure the lock context and type are valid
42  */
43 
44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 {
46 	u64 ret = 0;
47 	int i;
48 
49 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
50 		ret += qgroup->rsv.values[i];
51 
52 	return ret;
53 }
54 
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
57 {
58 	if (type == BTRFS_QGROUP_RSV_DATA)
59 		return "data";
60 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
61 		return "meta_pertrans";
62 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
63 		return "meta_prealloc";
64 	return NULL;
65 }
66 #endif
67 
68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
69 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
70 			   enum btrfs_qgroup_rsv_type type)
71 {
72 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
73 	qgroup->rsv.values[type] += num_bytes;
74 }
75 
76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
77 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
78 			       enum btrfs_qgroup_rsv_type type)
79 {
80 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
81 	if (qgroup->rsv.values[type] >= num_bytes) {
82 		qgroup->rsv.values[type] -= num_bytes;
83 		return;
84 	}
85 #ifdef CONFIG_BTRFS_DEBUG
86 	WARN_RATELIMIT(1,
87 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 		qgroup->qgroupid, qgroup_rsv_type_str(type),
89 		qgroup->rsv.values[type], num_bytes);
90 #endif
91 	qgroup->rsv.values[type] = 0;
92 }
93 
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
95 				     struct btrfs_qgroup *dest,
96 				     struct btrfs_qgroup *src)
97 {
98 	int i;
99 
100 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
101 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
102 }
103 
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
105 					 struct btrfs_qgroup *dest,
106 					  struct btrfs_qgroup *src)
107 {
108 	int i;
109 
110 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
111 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
112 }
113 
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
115 					   int mod)
116 {
117 	if (qg->old_refcnt < seq)
118 		qg->old_refcnt = seq;
119 	qg->old_refcnt += mod;
120 }
121 
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
123 					   int mod)
124 {
125 	if (qg->new_refcnt < seq)
126 		qg->new_refcnt = seq;
127 	qg->new_refcnt += mod;
128 }
129 
130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
131 {
132 	if (qg->old_refcnt < seq)
133 		return 0;
134 	return qg->old_refcnt - seq;
135 }
136 
137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
138 {
139 	if (qg->new_refcnt < seq)
140 		return 0;
141 	return qg->new_refcnt - seq;
142 }
143 
144 /*
145  * glue structure to represent the relations between qgroups.
146  */
147 struct btrfs_qgroup_list {
148 	struct list_head next_group;
149 	struct list_head next_member;
150 	struct btrfs_qgroup *group;
151 	struct btrfs_qgroup *member;
152 };
153 
154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
155 {
156 	return (u64)(uintptr_t)qg;
157 }
158 
159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
160 {
161 	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
162 }
163 
164 static int
165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
166 		   int init_flags);
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
168 
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
171 					   u64 qgroupid)
172 {
173 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
174 	struct btrfs_qgroup *qgroup;
175 
176 	while (n) {
177 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
178 		if (qgroup->qgroupid < qgroupid)
179 			n = n->rb_left;
180 		else if (qgroup->qgroupid > qgroupid)
181 			n = n->rb_right;
182 		else
183 			return qgroup;
184 	}
185 	return NULL;
186 }
187 
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
190 					  u64 qgroupid)
191 {
192 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
193 	struct rb_node *parent = NULL;
194 	struct btrfs_qgroup *qgroup;
195 
196 	while (*p) {
197 		parent = *p;
198 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
199 
200 		if (qgroup->qgroupid < qgroupid)
201 			p = &(*p)->rb_left;
202 		else if (qgroup->qgroupid > qgroupid)
203 			p = &(*p)->rb_right;
204 		else
205 			return qgroup;
206 	}
207 
208 	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
209 	if (!qgroup)
210 		return ERR_PTR(-ENOMEM);
211 
212 	qgroup->qgroupid = qgroupid;
213 	INIT_LIST_HEAD(&qgroup->groups);
214 	INIT_LIST_HEAD(&qgroup->members);
215 	INIT_LIST_HEAD(&qgroup->dirty);
216 
217 	rb_link_node(&qgroup->node, parent, p);
218 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
219 
220 	return qgroup;
221 }
222 
223 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
224 {
225 	struct btrfs_qgroup_list *list;
226 
227 	list_del(&qgroup->dirty);
228 	while (!list_empty(&qgroup->groups)) {
229 		list = list_first_entry(&qgroup->groups,
230 					struct btrfs_qgroup_list, next_group);
231 		list_del(&list->next_group);
232 		list_del(&list->next_member);
233 		kfree(list);
234 	}
235 
236 	while (!list_empty(&qgroup->members)) {
237 		list = list_first_entry(&qgroup->members,
238 					struct btrfs_qgroup_list, next_member);
239 		list_del(&list->next_group);
240 		list_del(&list->next_member);
241 		kfree(list);
242 	}
243 	kfree(qgroup);
244 }
245 
246 /* must be called with qgroup_lock held */
247 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
248 {
249 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
250 
251 	if (!qgroup)
252 		return -ENOENT;
253 
254 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
255 	__del_qgroup_rb(qgroup);
256 	return 0;
257 }
258 
259 /* must be called with qgroup_lock held */
260 static int add_relation_rb(struct btrfs_fs_info *fs_info,
261 			   u64 memberid, u64 parentid)
262 {
263 	struct btrfs_qgroup *member;
264 	struct btrfs_qgroup *parent;
265 	struct btrfs_qgroup_list *list;
266 
267 	member = find_qgroup_rb(fs_info, memberid);
268 	parent = find_qgroup_rb(fs_info, parentid);
269 	if (!member || !parent)
270 		return -ENOENT;
271 
272 	list = kzalloc(sizeof(*list), GFP_ATOMIC);
273 	if (!list)
274 		return -ENOMEM;
275 
276 	list->group = parent;
277 	list->member = member;
278 	list_add_tail(&list->next_group, &member->groups);
279 	list_add_tail(&list->next_member, &parent->members);
280 
281 	return 0;
282 }
283 
284 /* must be called with qgroup_lock held */
285 static int del_relation_rb(struct btrfs_fs_info *fs_info,
286 			   u64 memberid, u64 parentid)
287 {
288 	struct btrfs_qgroup *member;
289 	struct btrfs_qgroup *parent;
290 	struct btrfs_qgroup_list *list;
291 
292 	member = find_qgroup_rb(fs_info, memberid);
293 	parent = find_qgroup_rb(fs_info, parentid);
294 	if (!member || !parent)
295 		return -ENOENT;
296 
297 	list_for_each_entry(list, &member->groups, next_group) {
298 		if (list->group == parent) {
299 			list_del(&list->next_group);
300 			list_del(&list->next_member);
301 			kfree(list);
302 			return 0;
303 		}
304 	}
305 	return -ENOENT;
306 }
307 
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
310 			       u64 rfer, u64 excl)
311 {
312 	struct btrfs_qgroup *qgroup;
313 
314 	qgroup = find_qgroup_rb(fs_info, qgroupid);
315 	if (!qgroup)
316 		return -EINVAL;
317 	if (qgroup->rfer != rfer || qgroup->excl != excl)
318 		return -EINVAL;
319 	return 0;
320 }
321 #endif
322 
323 /*
324  * The full config is read in one go, only called from open_ctree()
325  * It doesn't use any locking, as at this point we're still single-threaded
326  */
327 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
328 {
329 	struct btrfs_key key;
330 	struct btrfs_key found_key;
331 	struct btrfs_root *quota_root = fs_info->quota_root;
332 	struct btrfs_path *path = NULL;
333 	struct extent_buffer *l;
334 	int slot;
335 	int ret = 0;
336 	u64 flags = 0;
337 	u64 rescan_progress = 0;
338 
339 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
340 		return 0;
341 
342 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
343 	if (!fs_info->qgroup_ulist) {
344 		ret = -ENOMEM;
345 		goto out;
346 	}
347 
348 	path = btrfs_alloc_path();
349 	if (!path) {
350 		ret = -ENOMEM;
351 		goto out;
352 	}
353 
354 	/* default this to quota off, in case no status key is found */
355 	fs_info->qgroup_flags = 0;
356 
357 	/*
358 	 * pass 1: read status, all qgroup infos and limits
359 	 */
360 	key.objectid = 0;
361 	key.type = 0;
362 	key.offset = 0;
363 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
364 	if (ret)
365 		goto out;
366 
367 	while (1) {
368 		struct btrfs_qgroup *qgroup;
369 
370 		slot = path->slots[0];
371 		l = path->nodes[0];
372 		btrfs_item_key_to_cpu(l, &found_key, slot);
373 
374 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
375 			struct btrfs_qgroup_status_item *ptr;
376 
377 			ptr = btrfs_item_ptr(l, slot,
378 					     struct btrfs_qgroup_status_item);
379 
380 			if (btrfs_qgroup_status_version(l, ptr) !=
381 			    BTRFS_QGROUP_STATUS_VERSION) {
382 				btrfs_err(fs_info,
383 				 "old qgroup version, quota disabled");
384 				goto out;
385 			}
386 			if (btrfs_qgroup_status_generation(l, ptr) !=
387 			    fs_info->generation) {
388 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
389 				btrfs_err(fs_info,
390 					"qgroup generation mismatch, marked as inconsistent");
391 			}
392 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
393 									  ptr);
394 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
395 			goto next1;
396 		}
397 
398 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
399 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
400 			goto next1;
401 
402 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
403 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
404 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
405 			btrfs_err(fs_info, "inconsistent qgroup config");
406 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
407 		}
408 		if (!qgroup) {
409 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
410 			if (IS_ERR(qgroup)) {
411 				ret = PTR_ERR(qgroup);
412 				goto out;
413 			}
414 		}
415 		switch (found_key.type) {
416 		case BTRFS_QGROUP_INFO_KEY: {
417 			struct btrfs_qgroup_info_item *ptr;
418 
419 			ptr = btrfs_item_ptr(l, slot,
420 					     struct btrfs_qgroup_info_item);
421 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
422 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
423 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
424 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
425 			/* generation currently unused */
426 			break;
427 		}
428 		case BTRFS_QGROUP_LIMIT_KEY: {
429 			struct btrfs_qgroup_limit_item *ptr;
430 
431 			ptr = btrfs_item_ptr(l, slot,
432 					     struct btrfs_qgroup_limit_item);
433 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
434 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
435 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
436 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
437 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
438 			break;
439 		}
440 		}
441 next1:
442 		ret = btrfs_next_item(quota_root, path);
443 		if (ret < 0)
444 			goto out;
445 		if (ret)
446 			break;
447 	}
448 	btrfs_release_path(path);
449 
450 	/*
451 	 * pass 2: read all qgroup relations
452 	 */
453 	key.objectid = 0;
454 	key.type = BTRFS_QGROUP_RELATION_KEY;
455 	key.offset = 0;
456 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
457 	if (ret)
458 		goto out;
459 	while (1) {
460 		slot = path->slots[0];
461 		l = path->nodes[0];
462 		btrfs_item_key_to_cpu(l, &found_key, slot);
463 
464 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
465 			goto next2;
466 
467 		if (found_key.objectid > found_key.offset) {
468 			/* parent <- member, not needed to build config */
469 			/* FIXME should we omit the key completely? */
470 			goto next2;
471 		}
472 
473 		ret = add_relation_rb(fs_info, found_key.objectid,
474 				      found_key.offset);
475 		if (ret == -ENOENT) {
476 			btrfs_warn(fs_info,
477 				"orphan qgroup relation 0x%llx->0x%llx",
478 				found_key.objectid, found_key.offset);
479 			ret = 0;	/* ignore the error */
480 		}
481 		if (ret)
482 			goto out;
483 next2:
484 		ret = btrfs_next_item(quota_root, path);
485 		if (ret < 0)
486 			goto out;
487 		if (ret)
488 			break;
489 	}
490 out:
491 	fs_info->qgroup_flags |= flags;
492 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
493 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
494 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
495 		 ret >= 0)
496 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
497 	btrfs_free_path(path);
498 
499 	if (ret < 0) {
500 		ulist_free(fs_info->qgroup_ulist);
501 		fs_info->qgroup_ulist = NULL;
502 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
503 	}
504 
505 	return ret < 0 ? ret : 0;
506 }
507 
508 /*
509  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510  * first two are in single-threaded paths.And for the third one, we have set
511  * quota_root to be null with qgroup_lock held before, so it is safe to clean
512  * up the in-memory structures without qgroup_lock held.
513  */
514 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
515 {
516 	struct rb_node *n;
517 	struct btrfs_qgroup *qgroup;
518 
519 	while ((n = rb_first(&fs_info->qgroup_tree))) {
520 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
521 		rb_erase(n, &fs_info->qgroup_tree);
522 		__del_qgroup_rb(qgroup);
523 	}
524 	/*
525 	 * We call btrfs_free_qgroup_config() when unmounting
526 	 * filesystem and disabling quota, so we set qgroup_ulist
527 	 * to be null here to avoid double free.
528 	 */
529 	ulist_free(fs_info->qgroup_ulist);
530 	fs_info->qgroup_ulist = NULL;
531 }
532 
533 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
534 				    u64 dst)
535 {
536 	int ret;
537 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
538 	struct btrfs_path *path;
539 	struct btrfs_key key;
540 
541 	path = btrfs_alloc_path();
542 	if (!path)
543 		return -ENOMEM;
544 
545 	key.objectid = src;
546 	key.type = BTRFS_QGROUP_RELATION_KEY;
547 	key.offset = dst;
548 
549 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
550 
551 	btrfs_mark_buffer_dirty(path->nodes[0]);
552 
553 	btrfs_free_path(path);
554 	return ret;
555 }
556 
557 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
558 				    u64 dst)
559 {
560 	int ret;
561 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
562 	struct btrfs_path *path;
563 	struct btrfs_key key;
564 
565 	path = btrfs_alloc_path();
566 	if (!path)
567 		return -ENOMEM;
568 
569 	key.objectid = src;
570 	key.type = BTRFS_QGROUP_RELATION_KEY;
571 	key.offset = dst;
572 
573 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
574 	if (ret < 0)
575 		goto out;
576 
577 	if (ret > 0) {
578 		ret = -ENOENT;
579 		goto out;
580 	}
581 
582 	ret = btrfs_del_item(trans, quota_root, path);
583 out:
584 	btrfs_free_path(path);
585 	return ret;
586 }
587 
588 static int add_qgroup_item(struct btrfs_trans_handle *trans,
589 			   struct btrfs_root *quota_root, u64 qgroupid)
590 {
591 	int ret;
592 	struct btrfs_path *path;
593 	struct btrfs_qgroup_info_item *qgroup_info;
594 	struct btrfs_qgroup_limit_item *qgroup_limit;
595 	struct extent_buffer *leaf;
596 	struct btrfs_key key;
597 
598 	if (btrfs_is_testing(quota_root->fs_info))
599 		return 0;
600 
601 	path = btrfs_alloc_path();
602 	if (!path)
603 		return -ENOMEM;
604 
605 	key.objectid = 0;
606 	key.type = BTRFS_QGROUP_INFO_KEY;
607 	key.offset = qgroupid;
608 
609 	/*
610 	 * Avoid a transaction abort by catching -EEXIST here. In that
611 	 * case, we proceed by re-initializing the existing structure
612 	 * on disk.
613 	 */
614 
615 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
616 				      sizeof(*qgroup_info));
617 	if (ret && ret != -EEXIST)
618 		goto out;
619 
620 	leaf = path->nodes[0];
621 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
622 				 struct btrfs_qgroup_info_item);
623 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
624 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
625 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
626 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
627 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
628 
629 	btrfs_mark_buffer_dirty(leaf);
630 
631 	btrfs_release_path(path);
632 
633 	key.type = BTRFS_QGROUP_LIMIT_KEY;
634 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
635 				      sizeof(*qgroup_limit));
636 	if (ret && ret != -EEXIST)
637 		goto out;
638 
639 	leaf = path->nodes[0];
640 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
641 				  struct btrfs_qgroup_limit_item);
642 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
643 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
644 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
645 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
646 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
647 
648 	btrfs_mark_buffer_dirty(leaf);
649 
650 	ret = 0;
651 out:
652 	btrfs_free_path(path);
653 	return ret;
654 }
655 
656 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
657 {
658 	int ret;
659 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
660 	struct btrfs_path *path;
661 	struct btrfs_key key;
662 
663 	path = btrfs_alloc_path();
664 	if (!path)
665 		return -ENOMEM;
666 
667 	key.objectid = 0;
668 	key.type = BTRFS_QGROUP_INFO_KEY;
669 	key.offset = qgroupid;
670 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
671 	if (ret < 0)
672 		goto out;
673 
674 	if (ret > 0) {
675 		ret = -ENOENT;
676 		goto out;
677 	}
678 
679 	ret = btrfs_del_item(trans, quota_root, path);
680 	if (ret)
681 		goto out;
682 
683 	btrfs_release_path(path);
684 
685 	key.type = BTRFS_QGROUP_LIMIT_KEY;
686 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
687 	if (ret < 0)
688 		goto out;
689 
690 	if (ret > 0) {
691 		ret = -ENOENT;
692 		goto out;
693 	}
694 
695 	ret = btrfs_del_item(trans, quota_root, path);
696 
697 out:
698 	btrfs_free_path(path);
699 	return ret;
700 }
701 
702 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
703 				    struct btrfs_qgroup *qgroup)
704 {
705 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
706 	struct btrfs_path *path;
707 	struct btrfs_key key;
708 	struct extent_buffer *l;
709 	struct btrfs_qgroup_limit_item *qgroup_limit;
710 	int ret;
711 	int slot;
712 
713 	key.objectid = 0;
714 	key.type = BTRFS_QGROUP_LIMIT_KEY;
715 	key.offset = qgroup->qgroupid;
716 
717 	path = btrfs_alloc_path();
718 	if (!path)
719 		return -ENOMEM;
720 
721 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
722 	if (ret > 0)
723 		ret = -ENOENT;
724 
725 	if (ret)
726 		goto out;
727 
728 	l = path->nodes[0];
729 	slot = path->slots[0];
730 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
731 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
732 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
733 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
734 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
735 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
736 
737 	btrfs_mark_buffer_dirty(l);
738 
739 out:
740 	btrfs_free_path(path);
741 	return ret;
742 }
743 
744 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
745 				   struct btrfs_qgroup *qgroup)
746 {
747 	struct btrfs_fs_info *fs_info = trans->fs_info;
748 	struct btrfs_root *quota_root = fs_info->quota_root;
749 	struct btrfs_path *path;
750 	struct btrfs_key key;
751 	struct extent_buffer *l;
752 	struct btrfs_qgroup_info_item *qgroup_info;
753 	int ret;
754 	int slot;
755 
756 	if (btrfs_is_testing(fs_info))
757 		return 0;
758 
759 	key.objectid = 0;
760 	key.type = BTRFS_QGROUP_INFO_KEY;
761 	key.offset = qgroup->qgroupid;
762 
763 	path = btrfs_alloc_path();
764 	if (!path)
765 		return -ENOMEM;
766 
767 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
768 	if (ret > 0)
769 		ret = -ENOENT;
770 
771 	if (ret)
772 		goto out;
773 
774 	l = path->nodes[0];
775 	slot = path->slots[0];
776 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
777 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
778 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
779 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
780 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
781 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
782 
783 	btrfs_mark_buffer_dirty(l);
784 
785 out:
786 	btrfs_free_path(path);
787 	return ret;
788 }
789 
790 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
791 {
792 	struct btrfs_fs_info *fs_info = trans->fs_info;
793 	struct btrfs_root *quota_root = fs_info->quota_root;
794 	struct btrfs_path *path;
795 	struct btrfs_key key;
796 	struct extent_buffer *l;
797 	struct btrfs_qgroup_status_item *ptr;
798 	int ret;
799 	int slot;
800 
801 	key.objectid = 0;
802 	key.type = BTRFS_QGROUP_STATUS_KEY;
803 	key.offset = 0;
804 
805 	path = btrfs_alloc_path();
806 	if (!path)
807 		return -ENOMEM;
808 
809 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
810 	if (ret > 0)
811 		ret = -ENOENT;
812 
813 	if (ret)
814 		goto out;
815 
816 	l = path->nodes[0];
817 	slot = path->slots[0];
818 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
819 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
820 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
821 	btrfs_set_qgroup_status_rescan(l, ptr,
822 				fs_info->qgroup_rescan_progress.objectid);
823 
824 	btrfs_mark_buffer_dirty(l);
825 
826 out:
827 	btrfs_free_path(path);
828 	return ret;
829 }
830 
831 /*
832  * called with qgroup_lock held
833  */
834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
835 				  struct btrfs_root *root)
836 {
837 	struct btrfs_path *path;
838 	struct btrfs_key key;
839 	struct extent_buffer *leaf = NULL;
840 	int ret;
841 	int nr = 0;
842 
843 	path = btrfs_alloc_path();
844 	if (!path)
845 		return -ENOMEM;
846 
847 	path->leave_spinning = 1;
848 
849 	key.objectid = 0;
850 	key.offset = 0;
851 	key.type = 0;
852 
853 	while (1) {
854 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855 		if (ret < 0)
856 			goto out;
857 		leaf = path->nodes[0];
858 		nr = btrfs_header_nritems(leaf);
859 		if (!nr)
860 			break;
861 		/*
862 		 * delete the leaf one by one
863 		 * since the whole tree is going
864 		 * to be deleted.
865 		 */
866 		path->slots[0] = 0;
867 		ret = btrfs_del_items(trans, root, path, 0, nr);
868 		if (ret)
869 			goto out;
870 
871 		btrfs_release_path(path);
872 	}
873 	ret = 0;
874 out:
875 	btrfs_free_path(path);
876 	return ret;
877 }
878 
879 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
880 {
881 	struct btrfs_root *quota_root;
882 	struct btrfs_root *tree_root = fs_info->tree_root;
883 	struct btrfs_path *path = NULL;
884 	struct btrfs_qgroup_status_item *ptr;
885 	struct extent_buffer *leaf;
886 	struct btrfs_key key;
887 	struct btrfs_key found_key;
888 	struct btrfs_qgroup *qgroup = NULL;
889 	struct btrfs_trans_handle *trans = NULL;
890 	int ret = 0;
891 	int slot;
892 
893 	mutex_lock(&fs_info->qgroup_ioctl_lock);
894 	if (fs_info->quota_root)
895 		goto out;
896 
897 	/*
898 	 * 1 for quota root item
899 	 * 1 for BTRFS_QGROUP_STATUS item
900 	 *
901 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
902 	 * per subvolume. However those are not currently reserved since it
903 	 * would be a lot of overkill.
904 	 */
905 	trans = btrfs_start_transaction(tree_root, 2);
906 	if (IS_ERR(trans)) {
907 		ret = PTR_ERR(trans);
908 		trans = NULL;
909 		goto out;
910 	}
911 
912 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
913 	if (!fs_info->qgroup_ulist) {
914 		ret = -ENOMEM;
915 		btrfs_abort_transaction(trans, ret);
916 		goto out;
917 	}
918 
919 	/*
920 	 * initially create the quota tree
921 	 */
922 	quota_root = btrfs_create_tree(trans, fs_info,
923 				       BTRFS_QUOTA_TREE_OBJECTID);
924 	if (IS_ERR(quota_root)) {
925 		ret =  PTR_ERR(quota_root);
926 		btrfs_abort_transaction(trans, ret);
927 		goto out;
928 	}
929 
930 	path = btrfs_alloc_path();
931 	if (!path) {
932 		ret = -ENOMEM;
933 		btrfs_abort_transaction(trans, ret);
934 		goto out_free_root;
935 	}
936 
937 	key.objectid = 0;
938 	key.type = BTRFS_QGROUP_STATUS_KEY;
939 	key.offset = 0;
940 
941 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
942 				      sizeof(*ptr));
943 	if (ret) {
944 		btrfs_abort_transaction(trans, ret);
945 		goto out_free_path;
946 	}
947 
948 	leaf = path->nodes[0];
949 	ptr = btrfs_item_ptr(leaf, path->slots[0],
950 				 struct btrfs_qgroup_status_item);
951 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
952 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
953 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
954 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
955 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
956 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
957 
958 	btrfs_mark_buffer_dirty(leaf);
959 
960 	key.objectid = 0;
961 	key.type = BTRFS_ROOT_REF_KEY;
962 	key.offset = 0;
963 
964 	btrfs_release_path(path);
965 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
966 	if (ret > 0)
967 		goto out_add_root;
968 	if (ret < 0) {
969 		btrfs_abort_transaction(trans, ret);
970 		goto out_free_path;
971 	}
972 
973 	while (1) {
974 		slot = path->slots[0];
975 		leaf = path->nodes[0];
976 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
977 
978 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
979 			ret = add_qgroup_item(trans, quota_root,
980 					      found_key.offset);
981 			if (ret) {
982 				btrfs_abort_transaction(trans, ret);
983 				goto out_free_path;
984 			}
985 
986 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
987 			if (IS_ERR(qgroup)) {
988 				ret = PTR_ERR(qgroup);
989 				btrfs_abort_transaction(trans, ret);
990 				goto out_free_path;
991 			}
992 		}
993 		ret = btrfs_next_item(tree_root, path);
994 		if (ret < 0) {
995 			btrfs_abort_transaction(trans, ret);
996 			goto out_free_path;
997 		}
998 		if (ret)
999 			break;
1000 	}
1001 
1002 out_add_root:
1003 	btrfs_release_path(path);
1004 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1005 	if (ret) {
1006 		btrfs_abort_transaction(trans, ret);
1007 		goto out_free_path;
1008 	}
1009 
1010 	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1011 	if (IS_ERR(qgroup)) {
1012 		ret = PTR_ERR(qgroup);
1013 		btrfs_abort_transaction(trans, ret);
1014 		goto out_free_path;
1015 	}
1016 
1017 	ret = btrfs_commit_transaction(trans);
1018 	trans = NULL;
1019 	if (ret)
1020 		goto out_free_path;
1021 
1022 	/*
1023 	 * Set quota enabled flag after committing the transaction, to avoid
1024 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1025 	 * creation.
1026 	 */
1027 	spin_lock(&fs_info->qgroup_lock);
1028 	fs_info->quota_root = quota_root;
1029 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1030 	spin_unlock(&fs_info->qgroup_lock);
1031 
1032 	ret = qgroup_rescan_init(fs_info, 0, 1);
1033 	if (!ret) {
1034 	        qgroup_rescan_zero_tracking(fs_info);
1035 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1036 	                         &fs_info->qgroup_rescan_work);
1037 	}
1038 
1039 out_free_path:
1040 	btrfs_free_path(path);
1041 out_free_root:
1042 	if (ret) {
1043 		free_extent_buffer(quota_root->node);
1044 		free_extent_buffer(quota_root->commit_root);
1045 		kfree(quota_root);
1046 	}
1047 out:
1048 	if (ret) {
1049 		ulist_free(fs_info->qgroup_ulist);
1050 		fs_info->qgroup_ulist = NULL;
1051 		if (trans)
1052 			btrfs_end_transaction(trans);
1053 	}
1054 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1055 	return ret;
1056 }
1057 
1058 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1059 {
1060 	struct btrfs_root *quota_root;
1061 	struct btrfs_trans_handle *trans = NULL;
1062 	int ret = 0;
1063 
1064 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1065 	if (!fs_info->quota_root)
1066 		goto out;
1067 
1068 	/*
1069 	 * 1 For the root item
1070 	 *
1071 	 * We should also reserve enough items for the quota tree deletion in
1072 	 * btrfs_clean_quota_tree but this is not done.
1073 	 */
1074 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1075 	if (IS_ERR(trans)) {
1076 		ret = PTR_ERR(trans);
1077 		goto out;
1078 	}
1079 
1080 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1081 	btrfs_qgroup_wait_for_completion(fs_info, false);
1082 	spin_lock(&fs_info->qgroup_lock);
1083 	quota_root = fs_info->quota_root;
1084 	fs_info->quota_root = NULL;
1085 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1086 	spin_unlock(&fs_info->qgroup_lock);
1087 
1088 	btrfs_free_qgroup_config(fs_info);
1089 
1090 	ret = btrfs_clean_quota_tree(trans, quota_root);
1091 	if (ret) {
1092 		btrfs_abort_transaction(trans, ret);
1093 		goto end_trans;
1094 	}
1095 
1096 	ret = btrfs_del_root(trans, &quota_root->root_key);
1097 	if (ret) {
1098 		btrfs_abort_transaction(trans, ret);
1099 		goto end_trans;
1100 	}
1101 
1102 	list_del(&quota_root->dirty_list);
1103 
1104 	btrfs_tree_lock(quota_root->node);
1105 	clean_tree_block(fs_info, quota_root->node);
1106 	btrfs_tree_unlock(quota_root->node);
1107 	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1108 
1109 	free_extent_buffer(quota_root->node);
1110 	free_extent_buffer(quota_root->commit_root);
1111 	kfree(quota_root);
1112 
1113 end_trans:
1114 	ret = btrfs_end_transaction(trans);
1115 out:
1116 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1117 	return ret;
1118 }
1119 
1120 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1121 			 struct btrfs_qgroup *qgroup)
1122 {
1123 	if (list_empty(&qgroup->dirty))
1124 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1125 }
1126 
1127 /*
1128  * The easy accounting, we're updating qgroup relationship whose child qgroup
1129  * only has exclusive extents.
1130  *
1131  * In this case, all exclusive extents will also be exclusive for parent, so
1132  * excl/rfer just get added/removed.
1133  *
1134  * So is qgroup reservation space, which should also be added/removed to
1135  * parent.
1136  * Or when child tries to release reservation space, parent will underflow its
1137  * reservation (for relationship adding case).
1138  *
1139  * Caller should hold fs_info->qgroup_lock.
1140  */
1141 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1142 				    struct ulist *tmp, u64 ref_root,
1143 				    struct btrfs_qgroup *src, int sign)
1144 {
1145 	struct btrfs_qgroup *qgroup;
1146 	struct btrfs_qgroup_list *glist;
1147 	struct ulist_node *unode;
1148 	struct ulist_iterator uiter;
1149 	u64 num_bytes = src->excl;
1150 	int ret = 0;
1151 
1152 	qgroup = find_qgroup_rb(fs_info, ref_root);
1153 	if (!qgroup)
1154 		goto out;
1155 
1156 	qgroup->rfer += sign * num_bytes;
1157 	qgroup->rfer_cmpr += sign * num_bytes;
1158 
1159 	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1160 	qgroup->excl += sign * num_bytes;
1161 	qgroup->excl_cmpr += sign * num_bytes;
1162 
1163 	if (sign > 0)
1164 		qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1165 	else
1166 		qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1167 
1168 	qgroup_dirty(fs_info, qgroup);
1169 
1170 	/* Get all of the parent groups that contain this qgroup */
1171 	list_for_each_entry(glist, &qgroup->groups, next_group) {
1172 		ret = ulist_add(tmp, glist->group->qgroupid,
1173 				qgroup_to_aux(glist->group), GFP_ATOMIC);
1174 		if (ret < 0)
1175 			goto out;
1176 	}
1177 
1178 	/* Iterate all of the parents and adjust their reference counts */
1179 	ULIST_ITER_INIT(&uiter);
1180 	while ((unode = ulist_next(tmp, &uiter))) {
1181 		qgroup = unode_aux_to_qgroup(unode);
1182 		qgroup->rfer += sign * num_bytes;
1183 		qgroup->rfer_cmpr += sign * num_bytes;
1184 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1185 		qgroup->excl += sign * num_bytes;
1186 		if (sign > 0)
1187 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1188 		else
1189 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1190 		qgroup->excl_cmpr += sign * num_bytes;
1191 		qgroup_dirty(fs_info, qgroup);
1192 
1193 		/* Add any parents of the parents */
1194 		list_for_each_entry(glist, &qgroup->groups, next_group) {
1195 			ret = ulist_add(tmp, glist->group->qgroupid,
1196 					qgroup_to_aux(glist->group), GFP_ATOMIC);
1197 			if (ret < 0)
1198 				goto out;
1199 		}
1200 	}
1201 	ret = 0;
1202 out:
1203 	return ret;
1204 }
1205 
1206 
1207 /*
1208  * Quick path for updating qgroup with only excl refs.
1209  *
1210  * In that case, just update all parent will be enough.
1211  * Or we needs to do a full rescan.
1212  * Caller should also hold fs_info->qgroup_lock.
1213  *
1214  * Return 0 for quick update, return >0 for need to full rescan
1215  * and mark INCONSISTENT flag.
1216  * Return < 0 for other error.
1217  */
1218 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1219 				   struct ulist *tmp, u64 src, u64 dst,
1220 				   int sign)
1221 {
1222 	struct btrfs_qgroup *qgroup;
1223 	int ret = 1;
1224 	int err = 0;
1225 
1226 	qgroup = find_qgroup_rb(fs_info, src);
1227 	if (!qgroup)
1228 		goto out;
1229 	if (qgroup->excl == qgroup->rfer) {
1230 		ret = 0;
1231 		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1232 					       qgroup, sign);
1233 		if (err < 0) {
1234 			ret = err;
1235 			goto out;
1236 		}
1237 	}
1238 out:
1239 	if (ret)
1240 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1241 	return ret;
1242 }
1243 
1244 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1245 			      u64 dst)
1246 {
1247 	struct btrfs_fs_info *fs_info = trans->fs_info;
1248 	struct btrfs_root *quota_root;
1249 	struct btrfs_qgroup *parent;
1250 	struct btrfs_qgroup *member;
1251 	struct btrfs_qgroup_list *list;
1252 	struct ulist *tmp;
1253 	int ret = 0;
1254 
1255 	/* Check the level of src and dst first */
1256 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1257 		return -EINVAL;
1258 
1259 	tmp = ulist_alloc(GFP_KERNEL);
1260 	if (!tmp)
1261 		return -ENOMEM;
1262 
1263 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1264 	quota_root = fs_info->quota_root;
1265 	if (!quota_root) {
1266 		ret = -EINVAL;
1267 		goto out;
1268 	}
1269 	member = find_qgroup_rb(fs_info, src);
1270 	parent = find_qgroup_rb(fs_info, dst);
1271 	if (!member || !parent) {
1272 		ret = -EINVAL;
1273 		goto out;
1274 	}
1275 
1276 	/* check if such qgroup relation exist firstly */
1277 	list_for_each_entry(list, &member->groups, next_group) {
1278 		if (list->group == parent) {
1279 			ret = -EEXIST;
1280 			goto out;
1281 		}
1282 	}
1283 
1284 	ret = add_qgroup_relation_item(trans, src, dst);
1285 	if (ret)
1286 		goto out;
1287 
1288 	ret = add_qgroup_relation_item(trans, dst, src);
1289 	if (ret) {
1290 		del_qgroup_relation_item(trans, src, dst);
1291 		goto out;
1292 	}
1293 
1294 	spin_lock(&fs_info->qgroup_lock);
1295 	ret = add_relation_rb(fs_info, src, dst);
1296 	if (ret < 0) {
1297 		spin_unlock(&fs_info->qgroup_lock);
1298 		goto out;
1299 	}
1300 	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1301 	spin_unlock(&fs_info->qgroup_lock);
1302 out:
1303 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1304 	ulist_free(tmp);
1305 	return ret;
1306 }
1307 
1308 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1309 				 u64 dst)
1310 {
1311 	struct btrfs_fs_info *fs_info = trans->fs_info;
1312 	struct btrfs_root *quota_root;
1313 	struct btrfs_qgroup *parent;
1314 	struct btrfs_qgroup *member;
1315 	struct btrfs_qgroup_list *list;
1316 	struct ulist *tmp;
1317 	int ret = 0;
1318 	int err;
1319 
1320 	tmp = ulist_alloc(GFP_KERNEL);
1321 	if (!tmp)
1322 		return -ENOMEM;
1323 
1324 	quota_root = fs_info->quota_root;
1325 	if (!quota_root) {
1326 		ret = -EINVAL;
1327 		goto out;
1328 	}
1329 
1330 	member = find_qgroup_rb(fs_info, src);
1331 	parent = find_qgroup_rb(fs_info, dst);
1332 	if (!member || !parent) {
1333 		ret = -EINVAL;
1334 		goto out;
1335 	}
1336 
1337 	/* check if such qgroup relation exist firstly */
1338 	list_for_each_entry(list, &member->groups, next_group) {
1339 		if (list->group == parent)
1340 			goto exist;
1341 	}
1342 	ret = -ENOENT;
1343 	goto out;
1344 exist:
1345 	ret = del_qgroup_relation_item(trans, src, dst);
1346 	err = del_qgroup_relation_item(trans, dst, src);
1347 	if (err && !ret)
1348 		ret = err;
1349 
1350 	spin_lock(&fs_info->qgroup_lock);
1351 	del_relation_rb(fs_info, src, dst);
1352 	ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1353 	spin_unlock(&fs_info->qgroup_lock);
1354 out:
1355 	ulist_free(tmp);
1356 	return ret;
1357 }
1358 
1359 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1360 			      u64 dst)
1361 {
1362 	struct btrfs_fs_info *fs_info = trans->fs_info;
1363 	int ret = 0;
1364 
1365 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1366 	ret = __del_qgroup_relation(trans, src, dst);
1367 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1368 
1369 	return ret;
1370 }
1371 
1372 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1373 {
1374 	struct btrfs_fs_info *fs_info = trans->fs_info;
1375 	struct btrfs_root *quota_root;
1376 	struct btrfs_qgroup *qgroup;
1377 	int ret = 0;
1378 
1379 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1380 	quota_root = fs_info->quota_root;
1381 	if (!quota_root) {
1382 		ret = -EINVAL;
1383 		goto out;
1384 	}
1385 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1386 	if (qgroup) {
1387 		ret = -EEXIST;
1388 		goto out;
1389 	}
1390 
1391 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1392 	if (ret)
1393 		goto out;
1394 
1395 	spin_lock(&fs_info->qgroup_lock);
1396 	qgroup = add_qgroup_rb(fs_info, qgroupid);
1397 	spin_unlock(&fs_info->qgroup_lock);
1398 
1399 	if (IS_ERR(qgroup))
1400 		ret = PTR_ERR(qgroup);
1401 out:
1402 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1403 	return ret;
1404 }
1405 
1406 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1407 {
1408 	struct btrfs_fs_info *fs_info = trans->fs_info;
1409 	struct btrfs_root *quota_root;
1410 	struct btrfs_qgroup *qgroup;
1411 	struct btrfs_qgroup_list *list;
1412 	int ret = 0;
1413 
1414 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1415 	quota_root = fs_info->quota_root;
1416 	if (!quota_root) {
1417 		ret = -EINVAL;
1418 		goto out;
1419 	}
1420 
1421 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1422 	if (!qgroup) {
1423 		ret = -ENOENT;
1424 		goto out;
1425 	}
1426 
1427 	/* Check if there are no children of this qgroup */
1428 	if (!list_empty(&qgroup->members)) {
1429 		ret = -EBUSY;
1430 		goto out;
1431 	}
1432 
1433 	ret = del_qgroup_item(trans, qgroupid);
1434 	if (ret && ret != -ENOENT)
1435 		goto out;
1436 
1437 	while (!list_empty(&qgroup->groups)) {
1438 		list = list_first_entry(&qgroup->groups,
1439 					struct btrfs_qgroup_list, next_group);
1440 		ret = __del_qgroup_relation(trans, qgroupid,
1441 					    list->group->qgroupid);
1442 		if (ret)
1443 			goto out;
1444 	}
1445 
1446 	spin_lock(&fs_info->qgroup_lock);
1447 	del_qgroup_rb(fs_info, qgroupid);
1448 	spin_unlock(&fs_info->qgroup_lock);
1449 out:
1450 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1451 	return ret;
1452 }
1453 
1454 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1455 		       struct btrfs_qgroup_limit *limit)
1456 {
1457 	struct btrfs_fs_info *fs_info = trans->fs_info;
1458 	struct btrfs_root *quota_root;
1459 	struct btrfs_qgroup *qgroup;
1460 	int ret = 0;
1461 	/* Sometimes we would want to clear the limit on this qgroup.
1462 	 * To meet this requirement, we treat the -1 as a special value
1463 	 * which tell kernel to clear the limit on this qgroup.
1464 	 */
1465 	const u64 CLEAR_VALUE = -1;
1466 
1467 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1468 	quota_root = fs_info->quota_root;
1469 	if (!quota_root) {
1470 		ret = -EINVAL;
1471 		goto out;
1472 	}
1473 
1474 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1475 	if (!qgroup) {
1476 		ret = -ENOENT;
1477 		goto out;
1478 	}
1479 
1480 	spin_lock(&fs_info->qgroup_lock);
1481 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1482 		if (limit->max_rfer == CLEAR_VALUE) {
1483 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1484 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1485 			qgroup->max_rfer = 0;
1486 		} else {
1487 			qgroup->max_rfer = limit->max_rfer;
1488 		}
1489 	}
1490 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1491 		if (limit->max_excl == CLEAR_VALUE) {
1492 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1493 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1494 			qgroup->max_excl = 0;
1495 		} else {
1496 			qgroup->max_excl = limit->max_excl;
1497 		}
1498 	}
1499 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1500 		if (limit->rsv_rfer == CLEAR_VALUE) {
1501 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1502 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1503 			qgroup->rsv_rfer = 0;
1504 		} else {
1505 			qgroup->rsv_rfer = limit->rsv_rfer;
1506 		}
1507 	}
1508 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1509 		if (limit->rsv_excl == CLEAR_VALUE) {
1510 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1511 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1512 			qgroup->rsv_excl = 0;
1513 		} else {
1514 			qgroup->rsv_excl = limit->rsv_excl;
1515 		}
1516 	}
1517 	qgroup->lim_flags |= limit->flags;
1518 
1519 	spin_unlock(&fs_info->qgroup_lock);
1520 
1521 	ret = update_qgroup_limit_item(trans, qgroup);
1522 	if (ret) {
1523 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1524 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1525 		       qgroupid);
1526 	}
1527 
1528 out:
1529 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1530 	return ret;
1531 }
1532 
1533 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1534 				struct btrfs_delayed_ref_root *delayed_refs,
1535 				struct btrfs_qgroup_extent_record *record)
1536 {
1537 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1538 	struct rb_node *parent_node = NULL;
1539 	struct btrfs_qgroup_extent_record *entry;
1540 	u64 bytenr = record->bytenr;
1541 
1542 	lockdep_assert_held(&delayed_refs->lock);
1543 	trace_btrfs_qgroup_trace_extent(fs_info, record);
1544 
1545 	while (*p) {
1546 		parent_node = *p;
1547 		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1548 				 node);
1549 		if (bytenr < entry->bytenr)
1550 			p = &(*p)->rb_left;
1551 		else if (bytenr > entry->bytenr)
1552 			p = &(*p)->rb_right;
1553 		else
1554 			return 1;
1555 	}
1556 
1557 	rb_link_node(&record->node, parent_node, p);
1558 	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1559 	return 0;
1560 }
1561 
1562 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1563 				   struct btrfs_qgroup_extent_record *qrecord)
1564 {
1565 	struct ulist *old_root;
1566 	u64 bytenr = qrecord->bytenr;
1567 	int ret;
1568 
1569 	ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1570 	if (ret < 0) {
1571 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1572 		btrfs_warn(fs_info,
1573 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1574 			ret);
1575 		return 0;
1576 	}
1577 
1578 	/*
1579 	 * Here we don't need to get the lock of
1580 	 * trans->transaction->delayed_refs, since inserted qrecord won't
1581 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
1582 	 *
1583 	 * So modifying qrecord->old_roots is safe here
1584 	 */
1585 	qrecord->old_roots = old_root;
1586 	return 0;
1587 }
1588 
1589 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1590 			      u64 num_bytes, gfp_t gfp_flag)
1591 {
1592 	struct btrfs_fs_info *fs_info = trans->fs_info;
1593 	struct btrfs_qgroup_extent_record *record;
1594 	struct btrfs_delayed_ref_root *delayed_refs;
1595 	int ret;
1596 
1597 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1598 	    || bytenr == 0 || num_bytes == 0)
1599 		return 0;
1600 	record = kmalloc(sizeof(*record), gfp_flag);
1601 	if (!record)
1602 		return -ENOMEM;
1603 
1604 	delayed_refs = &trans->transaction->delayed_refs;
1605 	record->bytenr = bytenr;
1606 	record->num_bytes = num_bytes;
1607 	record->old_roots = NULL;
1608 
1609 	spin_lock(&delayed_refs->lock);
1610 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1611 	spin_unlock(&delayed_refs->lock);
1612 	if (ret > 0) {
1613 		kfree(record);
1614 		return 0;
1615 	}
1616 	return btrfs_qgroup_trace_extent_post(fs_info, record);
1617 }
1618 
1619 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1620 				  struct extent_buffer *eb)
1621 {
1622 	struct btrfs_fs_info *fs_info = trans->fs_info;
1623 	int nr = btrfs_header_nritems(eb);
1624 	int i, extent_type, ret;
1625 	struct btrfs_key key;
1626 	struct btrfs_file_extent_item *fi;
1627 	u64 bytenr, num_bytes;
1628 
1629 	/* We can be called directly from walk_up_proc() */
1630 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1631 		return 0;
1632 
1633 	for (i = 0; i < nr; i++) {
1634 		btrfs_item_key_to_cpu(eb, &key, i);
1635 
1636 		if (key.type != BTRFS_EXTENT_DATA_KEY)
1637 			continue;
1638 
1639 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1640 		/* filter out non qgroup-accountable extents  */
1641 		extent_type = btrfs_file_extent_type(eb, fi);
1642 
1643 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1644 			continue;
1645 
1646 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1647 		if (!bytenr)
1648 			continue;
1649 
1650 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1651 
1652 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1653 						GFP_NOFS);
1654 		if (ret)
1655 			return ret;
1656 	}
1657 	cond_resched();
1658 	return 0;
1659 }
1660 
1661 /*
1662  * Walk up the tree from the bottom, freeing leaves and any interior
1663  * nodes which have had all slots visited. If a node (leaf or
1664  * interior) is freed, the node above it will have it's slot
1665  * incremented. The root node will never be freed.
1666  *
1667  * At the end of this function, we should have a path which has all
1668  * slots incremented to the next position for a search. If we need to
1669  * read a new node it will be NULL and the node above it will have the
1670  * correct slot selected for a later read.
1671  *
1672  * If we increment the root nodes slot counter past the number of
1673  * elements, 1 is returned to signal completion of the search.
1674  */
1675 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1676 {
1677 	int level = 0;
1678 	int nr, slot;
1679 	struct extent_buffer *eb;
1680 
1681 	if (root_level == 0)
1682 		return 1;
1683 
1684 	while (level <= root_level) {
1685 		eb = path->nodes[level];
1686 		nr = btrfs_header_nritems(eb);
1687 		path->slots[level]++;
1688 		slot = path->slots[level];
1689 		if (slot >= nr || level == 0) {
1690 			/*
1691 			 * Don't free the root -  we will detect this
1692 			 * condition after our loop and return a
1693 			 * positive value for caller to stop walking the tree.
1694 			 */
1695 			if (level != root_level) {
1696 				btrfs_tree_unlock_rw(eb, path->locks[level]);
1697 				path->locks[level] = 0;
1698 
1699 				free_extent_buffer(eb);
1700 				path->nodes[level] = NULL;
1701 				path->slots[level] = 0;
1702 			}
1703 		} else {
1704 			/*
1705 			 * We have a valid slot to walk back down
1706 			 * from. Stop here so caller can process these
1707 			 * new nodes.
1708 			 */
1709 			break;
1710 		}
1711 
1712 		level++;
1713 	}
1714 
1715 	eb = path->nodes[root_level];
1716 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
1717 		return 1;
1718 
1719 	return 0;
1720 }
1721 
1722 /*
1723  * Helper function to trace a subtree tree block swap.
1724  *
1725  * The swap will happen in highest tree block, but there may be a lot of
1726  * tree blocks involved.
1727  *
1728  * For example:
1729  *  OO = Old tree blocks
1730  *  NN = New tree blocks allocated during balance
1731  *
1732  *           File tree (257)                  Reloc tree for 257
1733  * L2              OO                                NN
1734  *               /    \                            /    \
1735  * L1          OO      OO (a)                    OO      NN (a)
1736  *            / \     / \                       / \     / \
1737  * L0       OO   OO OO   OO                   OO   OO NN   NN
1738  *                  (b)  (c)                          (b)  (c)
1739  *
1740  * When calling qgroup_trace_extent_swap(), we will pass:
1741  * @src_eb = OO(a)
1742  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1743  * @dst_level = 0
1744  * @root_level = 1
1745  *
1746  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1747  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1748  *
1749  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1750  *
1751  * 1) Tree search from @src_eb
1752  *    It should acts as a simplified btrfs_search_slot().
1753  *    The key for search can be extracted from @dst_path->nodes[dst_level]
1754  *    (first key).
1755  *
1756  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1757  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1758  *    They should be marked during previous (@dst_level = 1) iteration.
1759  *
1760  * 3) Mark file extents in leaves dirty
1761  *    We don't have good way to pick out new file extents only.
1762  *    So we still follow the old method by scanning all file extents in
1763  *    the leave.
1764  *
1765  * This function can free us from keeping two paths, thus later we only need
1766  * to care about how to iterate all new tree blocks in reloc tree.
1767  */
1768 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1769 				    struct extent_buffer *src_eb,
1770 				    struct btrfs_path *dst_path,
1771 				    int dst_level, int root_level,
1772 				    bool trace_leaf)
1773 {
1774 	struct btrfs_key key;
1775 	struct btrfs_path *src_path;
1776 	struct btrfs_fs_info *fs_info = trans->fs_info;
1777 	u32 nodesize = fs_info->nodesize;
1778 	int cur_level = root_level;
1779 	int ret;
1780 
1781 	BUG_ON(dst_level > root_level);
1782 	/* Level mismatch */
1783 	if (btrfs_header_level(src_eb) != root_level)
1784 		return -EINVAL;
1785 
1786 	src_path = btrfs_alloc_path();
1787 	if (!src_path) {
1788 		ret = -ENOMEM;
1789 		goto out;
1790 	}
1791 
1792 	if (dst_level)
1793 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1794 	else
1795 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1796 
1797 	/* For src_path */
1798 	extent_buffer_get(src_eb);
1799 	src_path->nodes[root_level] = src_eb;
1800 	src_path->slots[root_level] = dst_path->slots[root_level];
1801 	src_path->locks[root_level] = 0;
1802 
1803 	/* A simplified version of btrfs_search_slot() */
1804 	while (cur_level >= dst_level) {
1805 		struct btrfs_key src_key;
1806 		struct btrfs_key dst_key;
1807 
1808 		if (src_path->nodes[cur_level] == NULL) {
1809 			struct btrfs_key first_key;
1810 			struct extent_buffer *eb;
1811 			int parent_slot;
1812 			u64 child_gen;
1813 			u64 child_bytenr;
1814 
1815 			eb = src_path->nodes[cur_level + 1];
1816 			parent_slot = src_path->slots[cur_level + 1];
1817 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1818 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1819 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1820 
1821 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
1822 					     cur_level, &first_key);
1823 			if (IS_ERR(eb)) {
1824 				ret = PTR_ERR(eb);
1825 				goto out;
1826 			} else if (!extent_buffer_uptodate(eb)) {
1827 				free_extent_buffer(eb);
1828 				ret = -EIO;
1829 				goto out;
1830 			}
1831 
1832 			src_path->nodes[cur_level] = eb;
1833 
1834 			btrfs_tree_read_lock(eb);
1835 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1836 			src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1837 		}
1838 
1839 		src_path->slots[cur_level] = dst_path->slots[cur_level];
1840 		if (cur_level) {
1841 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
1842 					&dst_key, dst_path->slots[cur_level]);
1843 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
1844 					&src_key, src_path->slots[cur_level]);
1845 		} else {
1846 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
1847 					&dst_key, dst_path->slots[cur_level]);
1848 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
1849 					&src_key, src_path->slots[cur_level]);
1850 		}
1851 		/* Content mismatch, something went wrong */
1852 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
1853 			ret = -ENOENT;
1854 			goto out;
1855 		}
1856 		cur_level--;
1857 	}
1858 
1859 	/*
1860 	 * Now both @dst_path and @src_path have been populated, record the tree
1861 	 * blocks for qgroup accounting.
1862 	 */
1863 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
1864 			nodesize, GFP_NOFS);
1865 	if (ret < 0)
1866 		goto out;
1867 	ret = btrfs_qgroup_trace_extent(trans,
1868 			dst_path->nodes[dst_level]->start,
1869 			nodesize, GFP_NOFS);
1870 	if (ret < 0)
1871 		goto out;
1872 
1873 	/* Record leaf file extents */
1874 	if (dst_level == 0 && trace_leaf) {
1875 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
1876 		if (ret < 0)
1877 			goto out;
1878 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
1879 	}
1880 out:
1881 	btrfs_free_path(src_path);
1882 	return ret;
1883 }
1884 
1885 /*
1886  * Helper function to do recursive generation-aware depth-first search, to
1887  * locate all new tree blocks in a subtree of reloc tree.
1888  *
1889  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1890  *         reloc tree
1891  * L2         NN (a)
1892  *          /    \
1893  * L1    OO        NN (b)
1894  *      /  \      /  \
1895  * L0  OO  OO    OO  NN
1896  *               (c) (d)
1897  * If we pass:
1898  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1899  * @cur_level = 1
1900  * @root_level = 1
1901  *
1902  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1903  * above tree blocks along with their counter parts in file tree.
1904  * While during search, old tree blocks OO(c) will be skipped as tree block swap
1905  * won't affect OO(c).
1906  */
1907 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1908 					   struct extent_buffer *src_eb,
1909 					   struct btrfs_path *dst_path,
1910 					   int cur_level, int root_level,
1911 					   u64 last_snapshot, bool trace_leaf)
1912 {
1913 	struct btrfs_fs_info *fs_info = trans->fs_info;
1914 	struct extent_buffer *eb;
1915 	bool need_cleanup = false;
1916 	int ret = 0;
1917 	int i;
1918 
1919 	/* Level sanity check */
1920 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
1921 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
1922 	    root_level < cur_level) {
1923 		btrfs_err_rl(fs_info,
1924 			"%s: bad levels, cur_level=%d root_level=%d",
1925 			__func__, cur_level, root_level);
1926 		return -EUCLEAN;
1927 	}
1928 
1929 	/* Read the tree block if needed */
1930 	if (dst_path->nodes[cur_level] == NULL) {
1931 		struct btrfs_key first_key;
1932 		int parent_slot;
1933 		u64 child_gen;
1934 		u64 child_bytenr;
1935 
1936 		/*
1937 		 * dst_path->nodes[root_level] must be initialized before
1938 		 * calling this function.
1939 		 */
1940 		if (cur_level == root_level) {
1941 			btrfs_err_rl(fs_info,
1942 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1943 				__func__, root_level, root_level, cur_level);
1944 			return -EUCLEAN;
1945 		}
1946 
1947 		/*
1948 		 * We need to get child blockptr/gen from parent before we can
1949 		 * read it.
1950 		  */
1951 		eb = dst_path->nodes[cur_level + 1];
1952 		parent_slot = dst_path->slots[cur_level + 1];
1953 		child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1954 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1955 		btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1956 
1957 		/* This node is old, no need to trace */
1958 		if (child_gen < last_snapshot)
1959 			goto out;
1960 
1961 		eb = read_tree_block(fs_info, child_bytenr, child_gen,
1962 				     cur_level, &first_key);
1963 		if (IS_ERR(eb)) {
1964 			ret = PTR_ERR(eb);
1965 			goto out;
1966 		} else if (!extent_buffer_uptodate(eb)) {
1967 			free_extent_buffer(eb);
1968 			ret = -EIO;
1969 			goto out;
1970 		}
1971 
1972 		dst_path->nodes[cur_level] = eb;
1973 		dst_path->slots[cur_level] = 0;
1974 
1975 		btrfs_tree_read_lock(eb);
1976 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1977 		dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1978 		need_cleanup = true;
1979 	}
1980 
1981 	/* Now record this tree block and its counter part for qgroups */
1982 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
1983 				       root_level, trace_leaf);
1984 	if (ret < 0)
1985 		goto cleanup;
1986 
1987 	eb = dst_path->nodes[cur_level];
1988 
1989 	if (cur_level > 0) {
1990 		/* Iterate all child tree blocks */
1991 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
1992 			/* Skip old tree blocks as they won't be swapped */
1993 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
1994 				continue;
1995 			dst_path->slots[cur_level] = i;
1996 
1997 			/* Recursive call (at most 7 times) */
1998 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
1999 					dst_path, cur_level - 1, root_level,
2000 					last_snapshot, trace_leaf);
2001 			if (ret < 0)
2002 				goto cleanup;
2003 		}
2004 	}
2005 
2006 cleanup:
2007 	if (need_cleanup) {
2008 		/* Clean up */
2009 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2010 				     dst_path->locks[cur_level]);
2011 		free_extent_buffer(dst_path->nodes[cur_level]);
2012 		dst_path->nodes[cur_level] = NULL;
2013 		dst_path->slots[cur_level] = 0;
2014 		dst_path->locks[cur_level] = 0;
2015 	}
2016 out:
2017 	return ret;
2018 }
2019 
2020 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2021 				struct extent_buffer *src_eb,
2022 				struct extent_buffer *dst_eb,
2023 				u64 last_snapshot, bool trace_leaf)
2024 {
2025 	struct btrfs_fs_info *fs_info = trans->fs_info;
2026 	struct btrfs_path *dst_path = NULL;
2027 	int level;
2028 	int ret;
2029 
2030 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2031 		return 0;
2032 
2033 	/* Wrong parameter order */
2034 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2035 		btrfs_err_rl(fs_info,
2036 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2037 			     btrfs_header_generation(src_eb),
2038 			     btrfs_header_generation(dst_eb));
2039 		return -EUCLEAN;
2040 	}
2041 
2042 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2043 		ret = -EIO;
2044 		goto out;
2045 	}
2046 
2047 	level = btrfs_header_level(dst_eb);
2048 	dst_path = btrfs_alloc_path();
2049 	if (!dst_path) {
2050 		ret = -ENOMEM;
2051 		goto out;
2052 	}
2053 	/* For dst_path */
2054 	extent_buffer_get(dst_eb);
2055 	dst_path->nodes[level] = dst_eb;
2056 	dst_path->slots[level] = 0;
2057 	dst_path->locks[level] = 0;
2058 
2059 	/* Do the generation aware breadth-first search */
2060 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2061 					      level, last_snapshot, trace_leaf);
2062 	if (ret < 0)
2063 		goto out;
2064 	ret = 0;
2065 
2066 out:
2067 	btrfs_free_path(dst_path);
2068 	if (ret < 0)
2069 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2070 	return ret;
2071 }
2072 
2073 /*
2074  * Inform qgroup to trace subtree swap used in balance.
2075  *
2076  * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
2077  * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
2078  *
2079  * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
2080  * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
2081  * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
2082  * the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
2083  * and skip all tree blocks whose generation is smaller than last_snapshot.
2084  *
2085  * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
2086  * which could be the cause of very slow balance if the file tree is large.
2087  *
2088  * @src_parent, @src_slot: pointer to src (file tree) eb.
2089  * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
2090  */
2091 int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2092 				struct btrfs_block_group_cache *bg_cache,
2093 				struct extent_buffer *src_parent, int src_slot,
2094 				struct extent_buffer *dst_parent, int dst_slot,
2095 				u64 last_snapshot)
2096 {
2097 	struct btrfs_fs_info *fs_info = trans->fs_info;
2098 	struct btrfs_key first_key;
2099 	struct extent_buffer *src_eb = NULL;
2100 	struct extent_buffer *dst_eb = NULL;
2101 	bool trace_leaf = false;
2102 	u64 child_gen;
2103 	u64 child_bytenr;
2104 	int ret;
2105 
2106 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2107 		return 0;
2108 
2109 	/* Check parameter order */
2110 	if (btrfs_node_ptr_generation(src_parent, src_slot) >
2111 	    btrfs_node_ptr_generation(dst_parent, dst_slot)) {
2112 		btrfs_err_rl(fs_info,
2113 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2114 			btrfs_node_ptr_generation(src_parent, src_slot),
2115 			btrfs_node_ptr_generation(dst_parent, dst_slot));
2116 		return -EUCLEAN;
2117 	}
2118 
2119 	/*
2120 	 * Only trace leaf if we're relocating data block groups, this could
2121 	 * reduce tons of data extents tracing for meta/sys bg relocation.
2122 	 */
2123 	if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
2124 		trace_leaf = true;
2125 	/* Read out real @src_eb, pointed by @src_parent and @src_slot */
2126 	child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
2127 	child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
2128 	btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
2129 
2130 	src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
2131 			btrfs_header_level(src_parent) - 1, &first_key);
2132 	if (IS_ERR(src_eb)) {
2133 		ret = PTR_ERR(src_eb);
2134 		goto out;
2135 	}
2136 
2137 	/* Read out real @dst_eb, pointed by @src_parent and @src_slot */
2138 	child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
2139 	child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
2140 	btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
2141 
2142 	dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
2143 			btrfs_header_level(dst_parent) - 1, &first_key);
2144 	if (IS_ERR(dst_eb)) {
2145 		ret = PTR_ERR(dst_eb);
2146 		goto out;
2147 	}
2148 
2149 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2150 		ret = -EINVAL;
2151 		goto out;
2152 	}
2153 
2154 	/* Do the generation aware breadth-first search */
2155 	ret = qgroup_trace_subtree_swap(trans, src_eb, dst_eb, last_snapshot,
2156 					trace_leaf);
2157 	if (ret < 0)
2158 		goto out;
2159 	ret = 0;
2160 
2161 out:
2162 	free_extent_buffer(src_eb);
2163 	free_extent_buffer(dst_eb);
2164 	return ret;
2165 }
2166 
2167 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2168 			       struct extent_buffer *root_eb,
2169 			       u64 root_gen, int root_level)
2170 {
2171 	struct btrfs_fs_info *fs_info = trans->fs_info;
2172 	int ret = 0;
2173 	int level;
2174 	struct extent_buffer *eb = root_eb;
2175 	struct btrfs_path *path = NULL;
2176 
2177 	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2178 	BUG_ON(root_eb == NULL);
2179 
2180 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2181 		return 0;
2182 
2183 	if (!extent_buffer_uptodate(root_eb)) {
2184 		ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2185 		if (ret)
2186 			goto out;
2187 	}
2188 
2189 	if (root_level == 0) {
2190 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2191 		goto out;
2192 	}
2193 
2194 	path = btrfs_alloc_path();
2195 	if (!path)
2196 		return -ENOMEM;
2197 
2198 	/*
2199 	 * Walk down the tree.  Missing extent blocks are filled in as
2200 	 * we go. Metadata is accounted every time we read a new
2201 	 * extent block.
2202 	 *
2203 	 * When we reach a leaf, we account for file extent items in it,
2204 	 * walk back up the tree (adjusting slot pointers as we go)
2205 	 * and restart the search process.
2206 	 */
2207 	extent_buffer_get(root_eb); /* For path */
2208 	path->nodes[root_level] = root_eb;
2209 	path->slots[root_level] = 0;
2210 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2211 walk_down:
2212 	level = root_level;
2213 	while (level >= 0) {
2214 		if (path->nodes[level] == NULL) {
2215 			struct btrfs_key first_key;
2216 			int parent_slot;
2217 			u64 child_gen;
2218 			u64 child_bytenr;
2219 
2220 			/*
2221 			 * We need to get child blockptr/gen from parent before
2222 			 * we can read it.
2223 			  */
2224 			eb = path->nodes[level + 1];
2225 			parent_slot = path->slots[level + 1];
2226 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2227 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2228 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2229 
2230 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
2231 					     level, &first_key);
2232 			if (IS_ERR(eb)) {
2233 				ret = PTR_ERR(eb);
2234 				goto out;
2235 			} else if (!extent_buffer_uptodate(eb)) {
2236 				free_extent_buffer(eb);
2237 				ret = -EIO;
2238 				goto out;
2239 			}
2240 
2241 			path->nodes[level] = eb;
2242 			path->slots[level] = 0;
2243 
2244 			btrfs_tree_read_lock(eb);
2245 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2246 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
2247 
2248 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2249 							fs_info->nodesize,
2250 							GFP_NOFS);
2251 			if (ret)
2252 				goto out;
2253 		}
2254 
2255 		if (level == 0) {
2256 			ret = btrfs_qgroup_trace_leaf_items(trans,
2257 							    path->nodes[level]);
2258 			if (ret)
2259 				goto out;
2260 
2261 			/* Nonzero return here means we completed our search */
2262 			ret = adjust_slots_upwards(path, root_level);
2263 			if (ret)
2264 				break;
2265 
2266 			/* Restart search with new slots */
2267 			goto walk_down;
2268 		}
2269 
2270 		level--;
2271 	}
2272 
2273 	ret = 0;
2274 out:
2275 	btrfs_free_path(path);
2276 
2277 	return ret;
2278 }
2279 
2280 #define UPDATE_NEW	0
2281 #define UPDATE_OLD	1
2282 /*
2283  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2284  */
2285 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2286 				struct ulist *roots, struct ulist *tmp,
2287 				struct ulist *qgroups, u64 seq, int update_old)
2288 {
2289 	struct ulist_node *unode;
2290 	struct ulist_iterator uiter;
2291 	struct ulist_node *tmp_unode;
2292 	struct ulist_iterator tmp_uiter;
2293 	struct btrfs_qgroup *qg;
2294 	int ret = 0;
2295 
2296 	if (!roots)
2297 		return 0;
2298 	ULIST_ITER_INIT(&uiter);
2299 	while ((unode = ulist_next(roots, &uiter))) {
2300 		qg = find_qgroup_rb(fs_info, unode->val);
2301 		if (!qg)
2302 			continue;
2303 
2304 		ulist_reinit(tmp);
2305 		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2306 				GFP_ATOMIC);
2307 		if (ret < 0)
2308 			return ret;
2309 		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2310 		if (ret < 0)
2311 			return ret;
2312 		ULIST_ITER_INIT(&tmp_uiter);
2313 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2314 			struct btrfs_qgroup_list *glist;
2315 
2316 			qg = unode_aux_to_qgroup(tmp_unode);
2317 			if (update_old)
2318 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2319 			else
2320 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2321 			list_for_each_entry(glist, &qg->groups, next_group) {
2322 				ret = ulist_add(qgroups, glist->group->qgroupid,
2323 						qgroup_to_aux(glist->group),
2324 						GFP_ATOMIC);
2325 				if (ret < 0)
2326 					return ret;
2327 				ret = ulist_add(tmp, glist->group->qgroupid,
2328 						qgroup_to_aux(glist->group),
2329 						GFP_ATOMIC);
2330 				if (ret < 0)
2331 					return ret;
2332 			}
2333 		}
2334 	}
2335 	return 0;
2336 }
2337 
2338 /*
2339  * Update qgroup rfer/excl counters.
2340  * Rfer update is easy, codes can explain themselves.
2341  *
2342  * Excl update is tricky, the update is split into 2 part.
2343  * Part 1: Possible exclusive <-> sharing detect:
2344  *	|	A	|	!A	|
2345  *  -------------------------------------
2346  *  B	|	*	|	-	|
2347  *  -------------------------------------
2348  *  !B	|	+	|	**	|
2349  *  -------------------------------------
2350  *
2351  * Conditions:
2352  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2353  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2354  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2355  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2356  *
2357  * Results:
2358  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2359  * *: Definitely not changed.		**: Possible unchanged.
2360  *
2361  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2362  *
2363  * To make the logic clear, we first use condition A and B to split
2364  * combination into 4 results.
2365  *
2366  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2367  * only on variant maybe 0.
2368  *
2369  * Lastly, check result **, since there are 2 variants maybe 0, split them
2370  * again(2x2).
2371  * But this time we don't need to consider other things, the codes and logic
2372  * is easy to understand now.
2373  */
2374 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2375 				  struct ulist *qgroups,
2376 				  u64 nr_old_roots,
2377 				  u64 nr_new_roots,
2378 				  u64 num_bytes, u64 seq)
2379 {
2380 	struct ulist_node *unode;
2381 	struct ulist_iterator uiter;
2382 	struct btrfs_qgroup *qg;
2383 	u64 cur_new_count, cur_old_count;
2384 
2385 	ULIST_ITER_INIT(&uiter);
2386 	while ((unode = ulist_next(qgroups, &uiter))) {
2387 		bool dirty = false;
2388 
2389 		qg = unode_aux_to_qgroup(unode);
2390 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2391 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2392 
2393 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2394 					     cur_new_count);
2395 
2396 		/* Rfer update part */
2397 		if (cur_old_count == 0 && cur_new_count > 0) {
2398 			qg->rfer += num_bytes;
2399 			qg->rfer_cmpr += num_bytes;
2400 			dirty = true;
2401 		}
2402 		if (cur_old_count > 0 && cur_new_count == 0) {
2403 			qg->rfer -= num_bytes;
2404 			qg->rfer_cmpr -= num_bytes;
2405 			dirty = true;
2406 		}
2407 
2408 		/* Excl update part */
2409 		/* Exclusive/none -> shared case */
2410 		if (cur_old_count == nr_old_roots &&
2411 		    cur_new_count < nr_new_roots) {
2412 			/* Exclusive -> shared */
2413 			if (cur_old_count != 0) {
2414 				qg->excl -= num_bytes;
2415 				qg->excl_cmpr -= num_bytes;
2416 				dirty = true;
2417 			}
2418 		}
2419 
2420 		/* Shared -> exclusive/none case */
2421 		if (cur_old_count < nr_old_roots &&
2422 		    cur_new_count == nr_new_roots) {
2423 			/* Shared->exclusive */
2424 			if (cur_new_count != 0) {
2425 				qg->excl += num_bytes;
2426 				qg->excl_cmpr += num_bytes;
2427 				dirty = true;
2428 			}
2429 		}
2430 
2431 		/* Exclusive/none -> exclusive/none case */
2432 		if (cur_old_count == nr_old_roots &&
2433 		    cur_new_count == nr_new_roots) {
2434 			if (cur_old_count == 0) {
2435 				/* None -> exclusive/none */
2436 
2437 				if (cur_new_count != 0) {
2438 					/* None -> exclusive */
2439 					qg->excl += num_bytes;
2440 					qg->excl_cmpr += num_bytes;
2441 					dirty = true;
2442 				}
2443 				/* None -> none, nothing changed */
2444 			} else {
2445 				/* Exclusive -> exclusive/none */
2446 
2447 				if (cur_new_count == 0) {
2448 					/* Exclusive -> none */
2449 					qg->excl -= num_bytes;
2450 					qg->excl_cmpr -= num_bytes;
2451 					dirty = true;
2452 				}
2453 				/* Exclusive -> exclusive, nothing changed */
2454 			}
2455 		}
2456 
2457 		if (dirty)
2458 			qgroup_dirty(fs_info, qg);
2459 	}
2460 	return 0;
2461 }
2462 
2463 /*
2464  * Check if the @roots potentially is a list of fs tree roots
2465  *
2466  * Return 0 for definitely not a fs/subvol tree roots ulist
2467  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2468  *          one as well)
2469  */
2470 static int maybe_fs_roots(struct ulist *roots)
2471 {
2472 	struct ulist_node *unode;
2473 	struct ulist_iterator uiter;
2474 
2475 	/* Empty one, still possible for fs roots */
2476 	if (!roots || roots->nnodes == 0)
2477 		return 1;
2478 
2479 	ULIST_ITER_INIT(&uiter);
2480 	unode = ulist_next(roots, &uiter);
2481 	if (!unode)
2482 		return 1;
2483 
2484 	/*
2485 	 * If it contains fs tree roots, then it must belong to fs/subvol
2486 	 * trees.
2487 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2488 	 */
2489 	return is_fstree(unode->val);
2490 }
2491 
2492 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2493 				u64 num_bytes, struct ulist *old_roots,
2494 				struct ulist *new_roots)
2495 {
2496 	struct btrfs_fs_info *fs_info = trans->fs_info;
2497 	struct ulist *qgroups = NULL;
2498 	struct ulist *tmp = NULL;
2499 	u64 seq;
2500 	u64 nr_new_roots = 0;
2501 	u64 nr_old_roots = 0;
2502 	int ret = 0;
2503 
2504 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2505 		return 0;
2506 
2507 	if (new_roots) {
2508 		if (!maybe_fs_roots(new_roots))
2509 			goto out_free;
2510 		nr_new_roots = new_roots->nnodes;
2511 	}
2512 	if (old_roots) {
2513 		if (!maybe_fs_roots(old_roots))
2514 			goto out_free;
2515 		nr_old_roots = old_roots->nnodes;
2516 	}
2517 
2518 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2519 	if (nr_old_roots == 0 && nr_new_roots == 0)
2520 		goto out_free;
2521 
2522 	BUG_ON(!fs_info->quota_root);
2523 
2524 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2525 					num_bytes, nr_old_roots, nr_new_roots);
2526 
2527 	qgroups = ulist_alloc(GFP_NOFS);
2528 	if (!qgroups) {
2529 		ret = -ENOMEM;
2530 		goto out_free;
2531 	}
2532 	tmp = ulist_alloc(GFP_NOFS);
2533 	if (!tmp) {
2534 		ret = -ENOMEM;
2535 		goto out_free;
2536 	}
2537 
2538 	mutex_lock(&fs_info->qgroup_rescan_lock);
2539 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2540 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2541 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2542 			ret = 0;
2543 			goto out_free;
2544 		}
2545 	}
2546 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2547 
2548 	spin_lock(&fs_info->qgroup_lock);
2549 	seq = fs_info->qgroup_seq;
2550 
2551 	/* Update old refcnts using old_roots */
2552 	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2553 				   UPDATE_OLD);
2554 	if (ret < 0)
2555 		goto out;
2556 
2557 	/* Update new refcnts using new_roots */
2558 	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2559 				   UPDATE_NEW);
2560 	if (ret < 0)
2561 		goto out;
2562 
2563 	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2564 			       num_bytes, seq);
2565 
2566 	/*
2567 	 * Bump qgroup_seq to avoid seq overlap
2568 	 */
2569 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2570 out:
2571 	spin_unlock(&fs_info->qgroup_lock);
2572 out_free:
2573 	ulist_free(tmp);
2574 	ulist_free(qgroups);
2575 	ulist_free(old_roots);
2576 	ulist_free(new_roots);
2577 	return ret;
2578 }
2579 
2580 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2581 {
2582 	struct btrfs_fs_info *fs_info = trans->fs_info;
2583 	struct btrfs_qgroup_extent_record *record;
2584 	struct btrfs_delayed_ref_root *delayed_refs;
2585 	struct ulist *new_roots = NULL;
2586 	struct rb_node *node;
2587 	u64 num_dirty_extents = 0;
2588 	u64 qgroup_to_skip;
2589 	int ret = 0;
2590 
2591 	delayed_refs = &trans->transaction->delayed_refs;
2592 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2593 	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2594 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
2595 				  node);
2596 
2597 		num_dirty_extents++;
2598 		trace_btrfs_qgroup_account_extents(fs_info, record);
2599 
2600 		if (!ret) {
2601 			/*
2602 			 * Old roots should be searched when inserting qgroup
2603 			 * extent record
2604 			 */
2605 			if (WARN_ON(!record->old_roots)) {
2606 				/* Search commit root to find old_roots */
2607 				ret = btrfs_find_all_roots(NULL, fs_info,
2608 						record->bytenr, 0,
2609 						&record->old_roots, false);
2610 				if (ret < 0)
2611 					goto cleanup;
2612 			}
2613 
2614 			/*
2615 			 * Use SEQ_LAST as time_seq to do special search, which
2616 			 * doesn't lock tree or delayed_refs and search current
2617 			 * root. It's safe inside commit_transaction().
2618 			 */
2619 			ret = btrfs_find_all_roots(trans, fs_info,
2620 				record->bytenr, SEQ_LAST, &new_roots, false);
2621 			if (ret < 0)
2622 				goto cleanup;
2623 			if (qgroup_to_skip) {
2624 				ulist_del(new_roots, qgroup_to_skip, 0);
2625 				ulist_del(record->old_roots, qgroup_to_skip,
2626 					  0);
2627 			}
2628 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2629 							  record->num_bytes,
2630 							  record->old_roots,
2631 							  new_roots);
2632 			record->old_roots = NULL;
2633 			new_roots = NULL;
2634 		}
2635 cleanup:
2636 		ulist_free(record->old_roots);
2637 		ulist_free(new_roots);
2638 		new_roots = NULL;
2639 		rb_erase(node, &delayed_refs->dirty_extent_root);
2640 		kfree(record);
2641 
2642 	}
2643 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2644 				       num_dirty_extents);
2645 	return ret;
2646 }
2647 
2648 /*
2649  * called from commit_transaction. Writes all changed qgroups to disk.
2650  */
2651 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2652 {
2653 	struct btrfs_fs_info *fs_info = trans->fs_info;
2654 	struct btrfs_root *quota_root = fs_info->quota_root;
2655 	int ret = 0;
2656 
2657 	if (!quota_root)
2658 		return ret;
2659 
2660 	spin_lock(&fs_info->qgroup_lock);
2661 	while (!list_empty(&fs_info->dirty_qgroups)) {
2662 		struct btrfs_qgroup *qgroup;
2663 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
2664 					  struct btrfs_qgroup, dirty);
2665 		list_del_init(&qgroup->dirty);
2666 		spin_unlock(&fs_info->qgroup_lock);
2667 		ret = update_qgroup_info_item(trans, qgroup);
2668 		if (ret)
2669 			fs_info->qgroup_flags |=
2670 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2671 		ret = update_qgroup_limit_item(trans, qgroup);
2672 		if (ret)
2673 			fs_info->qgroup_flags |=
2674 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2675 		spin_lock(&fs_info->qgroup_lock);
2676 	}
2677 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2678 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2679 	else
2680 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2681 	spin_unlock(&fs_info->qgroup_lock);
2682 
2683 	ret = update_qgroup_status_item(trans);
2684 	if (ret)
2685 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2686 
2687 	return ret;
2688 }
2689 
2690 /*
2691  * Copy the accounting information between qgroups. This is necessary
2692  * when a snapshot or a subvolume is created. Throwing an error will
2693  * cause a transaction abort so we take extra care here to only error
2694  * when a readonly fs is a reasonable outcome.
2695  */
2696 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2697 			 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2698 {
2699 	int ret = 0;
2700 	int i;
2701 	u64 *i_qgroups;
2702 	struct btrfs_fs_info *fs_info = trans->fs_info;
2703 	struct btrfs_root *quota_root;
2704 	struct btrfs_qgroup *srcgroup;
2705 	struct btrfs_qgroup *dstgroup;
2706 	u32 level_size = 0;
2707 	u64 nums;
2708 
2709 	mutex_lock(&fs_info->qgroup_ioctl_lock);
2710 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2711 		goto out;
2712 
2713 	quota_root = fs_info->quota_root;
2714 	if (!quota_root) {
2715 		ret = -EINVAL;
2716 		goto out;
2717 	}
2718 
2719 	if (inherit) {
2720 		i_qgroups = (u64 *)(inherit + 1);
2721 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2722 		       2 * inherit->num_excl_copies;
2723 		for (i = 0; i < nums; ++i) {
2724 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2725 
2726 			/*
2727 			 * Zero out invalid groups so we can ignore
2728 			 * them later.
2729 			 */
2730 			if (!srcgroup ||
2731 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2732 				*i_qgroups = 0ULL;
2733 
2734 			++i_qgroups;
2735 		}
2736 	}
2737 
2738 	/*
2739 	 * create a tracking group for the subvol itself
2740 	 */
2741 	ret = add_qgroup_item(trans, quota_root, objectid);
2742 	if (ret)
2743 		goto out;
2744 
2745 	/*
2746 	 * add qgroup to all inherited groups
2747 	 */
2748 	if (inherit) {
2749 		i_qgroups = (u64 *)(inherit + 1);
2750 		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2751 			if (*i_qgroups == 0)
2752 				continue;
2753 			ret = add_qgroup_relation_item(trans, objectid,
2754 						       *i_qgroups);
2755 			if (ret && ret != -EEXIST)
2756 				goto out;
2757 			ret = add_qgroup_relation_item(trans, *i_qgroups,
2758 						       objectid);
2759 			if (ret && ret != -EEXIST)
2760 				goto out;
2761 		}
2762 		ret = 0;
2763 	}
2764 
2765 
2766 	spin_lock(&fs_info->qgroup_lock);
2767 
2768 	dstgroup = add_qgroup_rb(fs_info, objectid);
2769 	if (IS_ERR(dstgroup)) {
2770 		ret = PTR_ERR(dstgroup);
2771 		goto unlock;
2772 	}
2773 
2774 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2775 		dstgroup->lim_flags = inherit->lim.flags;
2776 		dstgroup->max_rfer = inherit->lim.max_rfer;
2777 		dstgroup->max_excl = inherit->lim.max_excl;
2778 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2779 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
2780 
2781 		ret = update_qgroup_limit_item(trans, dstgroup);
2782 		if (ret) {
2783 			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2784 			btrfs_info(fs_info,
2785 				   "unable to update quota limit for %llu",
2786 				   dstgroup->qgroupid);
2787 			goto unlock;
2788 		}
2789 	}
2790 
2791 	if (srcid) {
2792 		srcgroup = find_qgroup_rb(fs_info, srcid);
2793 		if (!srcgroup)
2794 			goto unlock;
2795 
2796 		/*
2797 		 * We call inherit after we clone the root in order to make sure
2798 		 * our counts don't go crazy, so at this point the only
2799 		 * difference between the two roots should be the root node.
2800 		 */
2801 		level_size = fs_info->nodesize;
2802 		dstgroup->rfer = srcgroup->rfer;
2803 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2804 		dstgroup->excl = level_size;
2805 		dstgroup->excl_cmpr = level_size;
2806 		srcgroup->excl = level_size;
2807 		srcgroup->excl_cmpr = level_size;
2808 
2809 		/* inherit the limit info */
2810 		dstgroup->lim_flags = srcgroup->lim_flags;
2811 		dstgroup->max_rfer = srcgroup->max_rfer;
2812 		dstgroup->max_excl = srcgroup->max_excl;
2813 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2814 		dstgroup->rsv_excl = srcgroup->rsv_excl;
2815 
2816 		qgroup_dirty(fs_info, dstgroup);
2817 		qgroup_dirty(fs_info, srcgroup);
2818 	}
2819 
2820 	if (!inherit)
2821 		goto unlock;
2822 
2823 	i_qgroups = (u64 *)(inherit + 1);
2824 	for (i = 0; i < inherit->num_qgroups; ++i) {
2825 		if (*i_qgroups) {
2826 			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2827 			if (ret)
2828 				goto unlock;
2829 		}
2830 		++i_qgroups;
2831 	}
2832 
2833 	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2834 		struct btrfs_qgroup *src;
2835 		struct btrfs_qgroup *dst;
2836 
2837 		if (!i_qgroups[0] || !i_qgroups[1])
2838 			continue;
2839 
2840 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2841 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2842 
2843 		if (!src || !dst) {
2844 			ret = -EINVAL;
2845 			goto unlock;
2846 		}
2847 
2848 		dst->rfer = src->rfer - level_size;
2849 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2850 	}
2851 	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2852 		struct btrfs_qgroup *src;
2853 		struct btrfs_qgroup *dst;
2854 
2855 		if (!i_qgroups[0] || !i_qgroups[1])
2856 			continue;
2857 
2858 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2859 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2860 
2861 		if (!src || !dst) {
2862 			ret = -EINVAL;
2863 			goto unlock;
2864 		}
2865 
2866 		dst->excl = src->excl + level_size;
2867 		dst->excl_cmpr = src->excl_cmpr + level_size;
2868 	}
2869 
2870 unlock:
2871 	spin_unlock(&fs_info->qgroup_lock);
2872 out:
2873 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2874 	return ret;
2875 }
2876 
2877 /*
2878  * Two limits to commit transaction in advance.
2879  *
2880  * For RATIO, it will be 1/RATIO of the remaining limit
2881  * (excluding data and prealloc meta) as threshold.
2882  * For SIZE, it will be in byte unit as threshold.
2883  */
2884 #define QGROUP_PERTRANS_RATIO		32
2885 #define QGROUP_PERTRANS_SIZE		SZ_32M
2886 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
2887 				const struct btrfs_qgroup *qg, u64 num_bytes)
2888 {
2889 	u64 limit;
2890 	u64 threshold;
2891 
2892 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2893 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2894 		return false;
2895 
2896 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2897 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2898 		return false;
2899 
2900 	/*
2901 	 * Even if we passed the check, it's better to check if reservation
2902 	 * for meta_pertrans is pushing us near limit.
2903 	 * If there is too much pertrans reservation or it's near the limit,
2904 	 * let's try commit transaction to free some, using transaction_kthread
2905 	 */
2906 	if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
2907 			      BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
2908 		if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
2909 			limit = qg->max_excl;
2910 		else
2911 			limit = qg->max_rfer;
2912 		threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
2913 			    qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
2914 			    QGROUP_PERTRANS_RATIO;
2915 		threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
2916 
2917 		/*
2918 		 * Use transaction_kthread to commit transaction, so we no
2919 		 * longer need to bother nested transaction nor lock context.
2920 		 */
2921 		if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
2922 			btrfs_commit_transaction_locksafe(fs_info);
2923 	}
2924 
2925 	return true;
2926 }
2927 
2928 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2929 			  enum btrfs_qgroup_rsv_type type)
2930 {
2931 	struct btrfs_root *quota_root;
2932 	struct btrfs_qgroup *qgroup;
2933 	struct btrfs_fs_info *fs_info = root->fs_info;
2934 	u64 ref_root = root->root_key.objectid;
2935 	int ret = 0;
2936 	struct ulist_node *unode;
2937 	struct ulist_iterator uiter;
2938 
2939 	if (!is_fstree(ref_root))
2940 		return 0;
2941 
2942 	if (num_bytes == 0)
2943 		return 0;
2944 
2945 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2946 	    capable(CAP_SYS_RESOURCE))
2947 		enforce = false;
2948 
2949 	spin_lock(&fs_info->qgroup_lock);
2950 	quota_root = fs_info->quota_root;
2951 	if (!quota_root)
2952 		goto out;
2953 
2954 	qgroup = find_qgroup_rb(fs_info, ref_root);
2955 	if (!qgroup)
2956 		goto out;
2957 
2958 	/*
2959 	 * in a first step, we check all affected qgroups if any limits would
2960 	 * be exceeded
2961 	 */
2962 	ulist_reinit(fs_info->qgroup_ulist);
2963 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2964 			qgroup_to_aux(qgroup), GFP_ATOMIC);
2965 	if (ret < 0)
2966 		goto out;
2967 	ULIST_ITER_INIT(&uiter);
2968 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2969 		struct btrfs_qgroup *qg;
2970 		struct btrfs_qgroup_list *glist;
2971 
2972 		qg = unode_aux_to_qgroup(unode);
2973 
2974 		if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
2975 			ret = -EDQUOT;
2976 			goto out;
2977 		}
2978 
2979 		list_for_each_entry(glist, &qg->groups, next_group) {
2980 			ret = ulist_add(fs_info->qgroup_ulist,
2981 					glist->group->qgroupid,
2982 					qgroup_to_aux(glist->group), GFP_ATOMIC);
2983 			if (ret < 0)
2984 				goto out;
2985 		}
2986 	}
2987 	ret = 0;
2988 	/*
2989 	 * no limits exceeded, now record the reservation into all qgroups
2990 	 */
2991 	ULIST_ITER_INIT(&uiter);
2992 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2993 		struct btrfs_qgroup *qg;
2994 
2995 		qg = unode_aux_to_qgroup(unode);
2996 
2997 		trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
2998 		qgroup_rsv_add(fs_info, qg, num_bytes, type);
2999 	}
3000 
3001 out:
3002 	spin_unlock(&fs_info->qgroup_lock);
3003 	return ret;
3004 }
3005 
3006 /*
3007  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3008  * qgroup).
3009  *
3010  * Will handle all higher level qgroup too.
3011  *
3012  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3013  * This special case is only used for META_PERTRANS type.
3014  */
3015 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3016 			       u64 ref_root, u64 num_bytes,
3017 			       enum btrfs_qgroup_rsv_type type)
3018 {
3019 	struct btrfs_root *quota_root;
3020 	struct btrfs_qgroup *qgroup;
3021 	struct ulist_node *unode;
3022 	struct ulist_iterator uiter;
3023 	int ret = 0;
3024 
3025 	if (!is_fstree(ref_root))
3026 		return;
3027 
3028 	if (num_bytes == 0)
3029 		return;
3030 
3031 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3032 		WARN(1, "%s: Invalid type to free", __func__);
3033 		return;
3034 	}
3035 	spin_lock(&fs_info->qgroup_lock);
3036 
3037 	quota_root = fs_info->quota_root;
3038 	if (!quota_root)
3039 		goto out;
3040 
3041 	qgroup = find_qgroup_rb(fs_info, ref_root);
3042 	if (!qgroup)
3043 		goto out;
3044 
3045 	if (num_bytes == (u64)-1)
3046 		/*
3047 		 * We're freeing all pertrans rsv, get reserved value from
3048 		 * level 0 qgroup as real num_bytes to free.
3049 		 */
3050 		num_bytes = qgroup->rsv.values[type];
3051 
3052 	ulist_reinit(fs_info->qgroup_ulist);
3053 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3054 			qgroup_to_aux(qgroup), GFP_ATOMIC);
3055 	if (ret < 0)
3056 		goto out;
3057 	ULIST_ITER_INIT(&uiter);
3058 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3059 		struct btrfs_qgroup *qg;
3060 		struct btrfs_qgroup_list *glist;
3061 
3062 		qg = unode_aux_to_qgroup(unode);
3063 
3064 		trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
3065 		qgroup_rsv_release(fs_info, qg, num_bytes, type);
3066 
3067 		list_for_each_entry(glist, &qg->groups, next_group) {
3068 			ret = ulist_add(fs_info->qgroup_ulist,
3069 					glist->group->qgroupid,
3070 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3071 			if (ret < 0)
3072 				goto out;
3073 		}
3074 	}
3075 
3076 out:
3077 	spin_unlock(&fs_info->qgroup_lock);
3078 }
3079 
3080 /*
3081  * Check if the leaf is the last leaf. Which means all node pointers
3082  * are at their last position.
3083  */
3084 static bool is_last_leaf(struct btrfs_path *path)
3085 {
3086 	int i;
3087 
3088 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3089 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3090 			return false;
3091 	}
3092 	return true;
3093 }
3094 
3095 /*
3096  * returns < 0 on error, 0 when more leafs are to be scanned.
3097  * returns 1 when done.
3098  */
3099 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3100 			      struct btrfs_path *path)
3101 {
3102 	struct btrfs_fs_info *fs_info = trans->fs_info;
3103 	struct btrfs_key found;
3104 	struct extent_buffer *scratch_leaf = NULL;
3105 	struct ulist *roots = NULL;
3106 	u64 num_bytes;
3107 	bool done;
3108 	int slot;
3109 	int ret;
3110 
3111 	mutex_lock(&fs_info->qgroup_rescan_lock);
3112 	ret = btrfs_search_slot_for_read(fs_info->extent_root,
3113 					 &fs_info->qgroup_rescan_progress,
3114 					 path, 1, 0);
3115 
3116 	btrfs_debug(fs_info,
3117 		"current progress key (%llu %u %llu), search_slot ret %d",
3118 		fs_info->qgroup_rescan_progress.objectid,
3119 		fs_info->qgroup_rescan_progress.type,
3120 		fs_info->qgroup_rescan_progress.offset, ret);
3121 
3122 	if (ret) {
3123 		/*
3124 		 * The rescan is about to end, we will not be scanning any
3125 		 * further blocks. We cannot unset the RESCAN flag here, because
3126 		 * we want to commit the transaction if everything went well.
3127 		 * To make the live accounting work in this phase, we set our
3128 		 * scan progress pointer such that every real extent objectid
3129 		 * will be smaller.
3130 		 */
3131 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3132 		btrfs_release_path(path);
3133 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3134 		return ret;
3135 	}
3136 	done = is_last_leaf(path);
3137 
3138 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3139 			      btrfs_header_nritems(path->nodes[0]) - 1);
3140 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3141 
3142 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3143 	if (!scratch_leaf) {
3144 		ret = -ENOMEM;
3145 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3146 		goto out;
3147 	}
3148 	slot = path->slots[0];
3149 	btrfs_release_path(path);
3150 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3151 
3152 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3153 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3154 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3155 		    found.type != BTRFS_METADATA_ITEM_KEY)
3156 			continue;
3157 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3158 			num_bytes = fs_info->nodesize;
3159 		else
3160 			num_bytes = found.offset;
3161 
3162 		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3163 					   &roots, false);
3164 		if (ret < 0)
3165 			goto out;
3166 		/* For rescan, just pass old_roots as NULL */
3167 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3168 						  num_bytes, NULL, roots);
3169 		if (ret < 0)
3170 			goto out;
3171 	}
3172 out:
3173 	if (scratch_leaf)
3174 		free_extent_buffer(scratch_leaf);
3175 
3176 	if (done && !ret) {
3177 		ret = 1;
3178 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3179 	}
3180 	return ret;
3181 }
3182 
3183 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3184 {
3185 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3186 						     qgroup_rescan_work);
3187 	struct btrfs_path *path;
3188 	struct btrfs_trans_handle *trans = NULL;
3189 	int err = -ENOMEM;
3190 	int ret = 0;
3191 
3192 	path = btrfs_alloc_path();
3193 	if (!path)
3194 		goto out;
3195 	/*
3196 	 * Rescan should only search for commit root, and any later difference
3197 	 * should be recorded by qgroup
3198 	 */
3199 	path->search_commit_root = 1;
3200 	path->skip_locking = 1;
3201 
3202 	err = 0;
3203 	while (!err && !btrfs_fs_closing(fs_info)) {
3204 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3205 		if (IS_ERR(trans)) {
3206 			err = PTR_ERR(trans);
3207 			break;
3208 		}
3209 		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3210 			err = -EINTR;
3211 		} else {
3212 			err = qgroup_rescan_leaf(trans, path);
3213 		}
3214 		if (err > 0)
3215 			btrfs_commit_transaction(trans);
3216 		else
3217 			btrfs_end_transaction(trans);
3218 	}
3219 
3220 out:
3221 	btrfs_free_path(path);
3222 
3223 	mutex_lock(&fs_info->qgroup_rescan_lock);
3224 	if (!btrfs_fs_closing(fs_info))
3225 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3226 
3227 	if (err > 0 &&
3228 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3229 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3230 	} else if (err < 0) {
3231 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3232 	}
3233 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3234 
3235 	/*
3236 	 * only update status, since the previous part has already updated the
3237 	 * qgroup info.
3238 	 */
3239 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
3240 	if (IS_ERR(trans)) {
3241 		err = PTR_ERR(trans);
3242 		btrfs_err(fs_info,
3243 			  "fail to start transaction for status update: %d",
3244 			  err);
3245 		goto done;
3246 	}
3247 	ret = update_qgroup_status_item(trans);
3248 	if (ret < 0) {
3249 		err = ret;
3250 		btrfs_err(fs_info, "fail to update qgroup status: %d", err);
3251 	}
3252 	btrfs_end_transaction(trans);
3253 
3254 	if (btrfs_fs_closing(fs_info)) {
3255 		btrfs_info(fs_info, "qgroup scan paused");
3256 	} else if (err >= 0) {
3257 		btrfs_info(fs_info, "qgroup scan completed%s",
3258 			err > 0 ? " (inconsistency flag cleared)" : "");
3259 	} else {
3260 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
3261 	}
3262 
3263 done:
3264 	mutex_lock(&fs_info->qgroup_rescan_lock);
3265 	fs_info->qgroup_rescan_running = false;
3266 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3267 	complete_all(&fs_info->qgroup_rescan_completion);
3268 }
3269 
3270 /*
3271  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3272  * memory required for the rescan context.
3273  */
3274 static int
3275 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3276 		   int init_flags)
3277 {
3278 	int ret = 0;
3279 
3280 	if (!init_flags) {
3281 		/* we're resuming qgroup rescan at mount time */
3282 		if (!(fs_info->qgroup_flags &
3283 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3284 			btrfs_warn(fs_info,
3285 			"qgroup rescan init failed, qgroup is not enabled");
3286 			ret = -EINVAL;
3287 		} else if (!(fs_info->qgroup_flags &
3288 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3289 			btrfs_warn(fs_info,
3290 			"qgroup rescan init failed, qgroup rescan is not queued");
3291 			ret = -EINVAL;
3292 		}
3293 
3294 		if (ret)
3295 			return ret;
3296 	}
3297 
3298 	mutex_lock(&fs_info->qgroup_rescan_lock);
3299 	spin_lock(&fs_info->qgroup_lock);
3300 
3301 	if (init_flags) {
3302 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3303 			btrfs_warn(fs_info,
3304 				   "qgroup rescan is already in progress");
3305 			ret = -EINPROGRESS;
3306 		} else if (!(fs_info->qgroup_flags &
3307 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3308 			btrfs_warn(fs_info,
3309 			"qgroup rescan init failed, qgroup is not enabled");
3310 			ret = -EINVAL;
3311 		}
3312 
3313 		if (ret) {
3314 			spin_unlock(&fs_info->qgroup_lock);
3315 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3316 			return ret;
3317 		}
3318 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3319 	}
3320 
3321 	memset(&fs_info->qgroup_rescan_progress, 0,
3322 		sizeof(fs_info->qgroup_rescan_progress));
3323 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3324 	init_completion(&fs_info->qgroup_rescan_completion);
3325 	fs_info->qgroup_rescan_running = true;
3326 
3327 	spin_unlock(&fs_info->qgroup_lock);
3328 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3329 
3330 	memset(&fs_info->qgroup_rescan_work, 0,
3331 	       sizeof(fs_info->qgroup_rescan_work));
3332 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3333 			btrfs_qgroup_rescan_helper,
3334 			btrfs_qgroup_rescan_worker, NULL, NULL);
3335 	return 0;
3336 }
3337 
3338 static void
3339 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3340 {
3341 	struct rb_node *n;
3342 	struct btrfs_qgroup *qgroup;
3343 
3344 	spin_lock(&fs_info->qgroup_lock);
3345 	/* clear all current qgroup tracking information */
3346 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3347 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3348 		qgroup->rfer = 0;
3349 		qgroup->rfer_cmpr = 0;
3350 		qgroup->excl = 0;
3351 		qgroup->excl_cmpr = 0;
3352 		qgroup_dirty(fs_info, qgroup);
3353 	}
3354 	spin_unlock(&fs_info->qgroup_lock);
3355 }
3356 
3357 int
3358 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3359 {
3360 	int ret = 0;
3361 	struct btrfs_trans_handle *trans;
3362 
3363 	ret = qgroup_rescan_init(fs_info, 0, 1);
3364 	if (ret)
3365 		return ret;
3366 
3367 	/*
3368 	 * We have set the rescan_progress to 0, which means no more
3369 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3370 	 * However, btrfs_qgroup_account_ref may be right after its call
3371 	 * to btrfs_find_all_roots, in which case it would still do the
3372 	 * accounting.
3373 	 * To solve this, we're committing the transaction, which will
3374 	 * ensure we run all delayed refs and only after that, we are
3375 	 * going to clear all tracking information for a clean start.
3376 	 */
3377 
3378 	trans = btrfs_join_transaction(fs_info->fs_root);
3379 	if (IS_ERR(trans)) {
3380 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3381 		return PTR_ERR(trans);
3382 	}
3383 	ret = btrfs_commit_transaction(trans);
3384 	if (ret) {
3385 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3386 		return ret;
3387 	}
3388 
3389 	qgroup_rescan_zero_tracking(fs_info);
3390 
3391 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
3392 			 &fs_info->qgroup_rescan_work);
3393 
3394 	return 0;
3395 }
3396 
3397 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3398 				     bool interruptible)
3399 {
3400 	int running;
3401 	int ret = 0;
3402 
3403 	mutex_lock(&fs_info->qgroup_rescan_lock);
3404 	spin_lock(&fs_info->qgroup_lock);
3405 	running = fs_info->qgroup_rescan_running;
3406 	spin_unlock(&fs_info->qgroup_lock);
3407 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3408 
3409 	if (!running)
3410 		return 0;
3411 
3412 	if (interruptible)
3413 		ret = wait_for_completion_interruptible(
3414 					&fs_info->qgroup_rescan_completion);
3415 	else
3416 		wait_for_completion(&fs_info->qgroup_rescan_completion);
3417 
3418 	return ret;
3419 }
3420 
3421 /*
3422  * this is only called from open_ctree where we're still single threaded, thus
3423  * locking is omitted here.
3424  */
3425 void
3426 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3427 {
3428 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
3429 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3430 				 &fs_info->qgroup_rescan_work);
3431 }
3432 
3433 /*
3434  * Reserve qgroup space for range [start, start + len).
3435  *
3436  * This function will either reserve space from related qgroups or doing
3437  * nothing if the range is already reserved.
3438  *
3439  * Return 0 for successful reserve
3440  * Return <0 for error (including -EQUOT)
3441  *
3442  * NOTE: this function may sleep for memory allocation.
3443  *       if btrfs_qgroup_reserve_data() is called multiple times with
3444  *       same @reserved, caller must ensure when error happens it's OK
3445  *       to free *ALL* reserved space.
3446  */
3447 int btrfs_qgroup_reserve_data(struct inode *inode,
3448 			struct extent_changeset **reserved_ret, u64 start,
3449 			u64 len)
3450 {
3451 	struct btrfs_root *root = BTRFS_I(inode)->root;
3452 	struct ulist_node *unode;
3453 	struct ulist_iterator uiter;
3454 	struct extent_changeset *reserved;
3455 	u64 orig_reserved;
3456 	u64 to_reserve;
3457 	int ret;
3458 
3459 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3460 	    !is_fstree(root->root_key.objectid) || len == 0)
3461 		return 0;
3462 
3463 	/* @reserved parameter is mandatory for qgroup */
3464 	if (WARN_ON(!reserved_ret))
3465 		return -EINVAL;
3466 	if (!*reserved_ret) {
3467 		*reserved_ret = extent_changeset_alloc();
3468 		if (!*reserved_ret)
3469 			return -ENOMEM;
3470 	}
3471 	reserved = *reserved_ret;
3472 	/* Record already reserved space */
3473 	orig_reserved = reserved->bytes_changed;
3474 	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3475 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3476 
3477 	/* Newly reserved space */
3478 	to_reserve = reserved->bytes_changed - orig_reserved;
3479 	trace_btrfs_qgroup_reserve_data(inode, start, len,
3480 					to_reserve, QGROUP_RESERVE);
3481 	if (ret < 0)
3482 		goto cleanup;
3483 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3484 	if (ret < 0)
3485 		goto cleanup;
3486 
3487 	return ret;
3488 
3489 cleanup:
3490 	/* cleanup *ALL* already reserved ranges */
3491 	ULIST_ITER_INIT(&uiter);
3492 	while ((unode = ulist_next(&reserved->range_changed, &uiter)))
3493 		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
3494 				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
3495 	extent_changeset_release(reserved);
3496 	return ret;
3497 }
3498 
3499 /* Free ranges specified by @reserved, normally in error path */
3500 static int qgroup_free_reserved_data(struct inode *inode,
3501 			struct extent_changeset *reserved, u64 start, u64 len)
3502 {
3503 	struct btrfs_root *root = BTRFS_I(inode)->root;
3504 	struct ulist_node *unode;
3505 	struct ulist_iterator uiter;
3506 	struct extent_changeset changeset;
3507 	int freed = 0;
3508 	int ret;
3509 
3510 	extent_changeset_init(&changeset);
3511 	len = round_up(start + len, root->fs_info->sectorsize);
3512 	start = round_down(start, root->fs_info->sectorsize);
3513 
3514 	ULIST_ITER_INIT(&uiter);
3515 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3516 		u64 range_start = unode->val;
3517 		/* unode->aux is the inclusive end */
3518 		u64 range_len = unode->aux - range_start + 1;
3519 		u64 free_start;
3520 		u64 free_len;
3521 
3522 		extent_changeset_release(&changeset);
3523 
3524 		/* Only free range in range [start, start + len) */
3525 		if (range_start >= start + len ||
3526 		    range_start + range_len <= start)
3527 			continue;
3528 		free_start = max(range_start, start);
3529 		free_len = min(start + len, range_start + range_len) -
3530 			   free_start;
3531 		/*
3532 		 * TODO: To also modify reserved->ranges_reserved to reflect
3533 		 * the modification.
3534 		 *
3535 		 * However as long as we free qgroup reserved according to
3536 		 * EXTENT_QGROUP_RESERVED, we won't double free.
3537 		 * So not need to rush.
3538 		 */
3539 		ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
3540 				free_start, free_start + free_len - 1,
3541 				EXTENT_QGROUP_RESERVED, &changeset);
3542 		if (ret < 0)
3543 			goto out;
3544 		freed += changeset.bytes_changed;
3545 	}
3546 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3547 				  BTRFS_QGROUP_RSV_DATA);
3548 	ret = freed;
3549 out:
3550 	extent_changeset_release(&changeset);
3551 	return ret;
3552 }
3553 
3554 static int __btrfs_qgroup_release_data(struct inode *inode,
3555 			struct extent_changeset *reserved, u64 start, u64 len,
3556 			int free)
3557 {
3558 	struct extent_changeset changeset;
3559 	int trace_op = QGROUP_RELEASE;
3560 	int ret;
3561 
3562 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
3563 		      &BTRFS_I(inode)->root->fs_info->flags))
3564 		return 0;
3565 
3566 	/* In release case, we shouldn't have @reserved */
3567 	WARN_ON(!free && reserved);
3568 	if (free && reserved)
3569 		return qgroup_free_reserved_data(inode, reserved, start, len);
3570 	extent_changeset_init(&changeset);
3571 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3572 			start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3573 	if (ret < 0)
3574 		goto out;
3575 
3576 	if (free)
3577 		trace_op = QGROUP_FREE;
3578 	trace_btrfs_qgroup_release_data(inode, start, len,
3579 					changeset.bytes_changed, trace_op);
3580 	if (free)
3581 		btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3582 				BTRFS_I(inode)->root->root_key.objectid,
3583 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3584 	ret = changeset.bytes_changed;
3585 out:
3586 	extent_changeset_release(&changeset);
3587 	return ret;
3588 }
3589 
3590 /*
3591  * Free a reserved space range from io_tree and related qgroups
3592  *
3593  * Should be called when a range of pages get invalidated before reaching disk.
3594  * Or for error cleanup case.
3595  * if @reserved is given, only reserved range in [@start, @start + @len) will
3596  * be freed.
3597  *
3598  * For data written to disk, use btrfs_qgroup_release_data().
3599  *
3600  * NOTE: This function may sleep for memory allocation.
3601  */
3602 int btrfs_qgroup_free_data(struct inode *inode,
3603 			struct extent_changeset *reserved, u64 start, u64 len)
3604 {
3605 	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3606 }
3607 
3608 /*
3609  * Release a reserved space range from io_tree only.
3610  *
3611  * Should be called when a range of pages get written to disk and corresponding
3612  * FILE_EXTENT is inserted into corresponding root.
3613  *
3614  * Since new qgroup accounting framework will only update qgroup numbers at
3615  * commit_transaction() time, its reserved space shouldn't be freed from
3616  * related qgroups.
3617  *
3618  * But we should release the range from io_tree, to allow further write to be
3619  * COWed.
3620  *
3621  * NOTE: This function may sleep for memory allocation.
3622  */
3623 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3624 {
3625 	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3626 }
3627 
3628 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3629 			      enum btrfs_qgroup_rsv_type type)
3630 {
3631 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3632 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3633 		return;
3634 	if (num_bytes == 0)
3635 		return;
3636 
3637 	spin_lock(&root->qgroup_meta_rsv_lock);
3638 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3639 		root->qgroup_meta_rsv_prealloc += num_bytes;
3640 	else
3641 		root->qgroup_meta_rsv_pertrans += num_bytes;
3642 	spin_unlock(&root->qgroup_meta_rsv_lock);
3643 }
3644 
3645 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3646 			     enum btrfs_qgroup_rsv_type type)
3647 {
3648 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3649 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3650 		return 0;
3651 	if (num_bytes == 0)
3652 		return 0;
3653 
3654 	spin_lock(&root->qgroup_meta_rsv_lock);
3655 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3656 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3657 				  num_bytes);
3658 		root->qgroup_meta_rsv_prealloc -= num_bytes;
3659 	} else {
3660 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3661 				  num_bytes);
3662 		root->qgroup_meta_rsv_pertrans -= num_bytes;
3663 	}
3664 	spin_unlock(&root->qgroup_meta_rsv_lock);
3665 	return num_bytes;
3666 }
3667 
3668 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3669 				enum btrfs_qgroup_rsv_type type, bool enforce)
3670 {
3671 	struct btrfs_fs_info *fs_info = root->fs_info;
3672 	int ret;
3673 
3674 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3675 	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
3676 		return 0;
3677 
3678 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3679 	trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
3680 	ret = qgroup_reserve(root, num_bytes, enforce, type);
3681 	if (ret < 0)
3682 		return ret;
3683 	/*
3684 	 * Record what we have reserved into root.
3685 	 *
3686 	 * To avoid quota disabled->enabled underflow.
3687 	 * In that case, we may try to free space we haven't reserved
3688 	 * (since quota was disabled), so record what we reserved into root.
3689 	 * And ensure later release won't underflow this number.
3690 	 */
3691 	add_root_meta_rsv(root, num_bytes, type);
3692 	return ret;
3693 }
3694 
3695 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3696 {
3697 	struct btrfs_fs_info *fs_info = root->fs_info;
3698 
3699 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3700 	    !is_fstree(root->root_key.objectid))
3701 		return;
3702 
3703 	/* TODO: Update trace point to handle such free */
3704 	trace_qgroup_meta_free_all_pertrans(root);
3705 	/* Special value -1 means to free all reserved space */
3706 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3707 				  BTRFS_QGROUP_RSV_META_PERTRANS);
3708 }
3709 
3710 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3711 			      enum btrfs_qgroup_rsv_type type)
3712 {
3713 	struct btrfs_fs_info *fs_info = root->fs_info;
3714 
3715 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3716 	    !is_fstree(root->root_key.objectid))
3717 		return;
3718 
3719 	/*
3720 	 * reservation for META_PREALLOC can happen before quota is enabled,
3721 	 * which can lead to underflow.
3722 	 * Here ensure we will only free what we really have reserved.
3723 	 */
3724 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3725 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3726 	trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
3727 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
3728 				  num_bytes, type);
3729 }
3730 
3731 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3732 				int num_bytes)
3733 {
3734 	struct btrfs_root *quota_root = fs_info->quota_root;
3735 	struct btrfs_qgroup *qgroup;
3736 	struct ulist_node *unode;
3737 	struct ulist_iterator uiter;
3738 	int ret = 0;
3739 
3740 	if (num_bytes == 0)
3741 		return;
3742 	if (!quota_root)
3743 		return;
3744 
3745 	spin_lock(&fs_info->qgroup_lock);
3746 	qgroup = find_qgroup_rb(fs_info, ref_root);
3747 	if (!qgroup)
3748 		goto out;
3749 	ulist_reinit(fs_info->qgroup_ulist);
3750 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3751 		       qgroup_to_aux(qgroup), GFP_ATOMIC);
3752 	if (ret < 0)
3753 		goto out;
3754 	ULIST_ITER_INIT(&uiter);
3755 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3756 		struct btrfs_qgroup *qg;
3757 		struct btrfs_qgroup_list *glist;
3758 
3759 		qg = unode_aux_to_qgroup(unode);
3760 
3761 		qgroup_rsv_release(fs_info, qg, num_bytes,
3762 				BTRFS_QGROUP_RSV_META_PREALLOC);
3763 		qgroup_rsv_add(fs_info, qg, num_bytes,
3764 				BTRFS_QGROUP_RSV_META_PERTRANS);
3765 		list_for_each_entry(glist, &qg->groups, next_group) {
3766 			ret = ulist_add(fs_info->qgroup_ulist,
3767 					glist->group->qgroupid,
3768 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3769 			if (ret < 0)
3770 				goto out;
3771 		}
3772 	}
3773 out:
3774 	spin_unlock(&fs_info->qgroup_lock);
3775 }
3776 
3777 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3778 {
3779 	struct btrfs_fs_info *fs_info = root->fs_info;
3780 
3781 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3782 	    !is_fstree(root->root_key.objectid))
3783 		return;
3784 	/* Same as btrfs_qgroup_free_meta_prealloc() */
3785 	num_bytes = sub_root_meta_rsv(root, num_bytes,
3786 				      BTRFS_QGROUP_RSV_META_PREALLOC);
3787 	trace_qgroup_meta_convert(root, num_bytes);
3788 	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
3789 }
3790 
3791 /*
3792  * Check qgroup reserved space leaking, normally at destroy inode
3793  * time
3794  */
3795 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3796 {
3797 	struct extent_changeset changeset;
3798 	struct ulist_node *unode;
3799 	struct ulist_iterator iter;
3800 	int ret;
3801 
3802 	extent_changeset_init(&changeset);
3803 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3804 			EXTENT_QGROUP_RESERVED, &changeset);
3805 
3806 	WARN_ON(ret < 0);
3807 	if (WARN_ON(changeset.bytes_changed)) {
3808 		ULIST_ITER_INIT(&iter);
3809 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3810 			btrfs_warn(BTRFS_I(inode)->root->fs_info,
3811 				"leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3812 				inode->i_ino, unode->val, unode->aux);
3813 		}
3814 		btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3815 				BTRFS_I(inode)->root->root_key.objectid,
3816 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3817 
3818 	}
3819 	extent_changeset_release(&changeset);
3820 }
3821 
3822 void btrfs_qgroup_init_swapped_blocks(
3823 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
3824 {
3825 	int i;
3826 
3827 	spin_lock_init(&swapped_blocks->lock);
3828 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3829 		swapped_blocks->blocks[i] = RB_ROOT;
3830 	swapped_blocks->swapped = false;
3831 }
3832 
3833 /*
3834  * Delete all swapped blocks record of @root.
3835  * Every record here means we skipped a full subtree scan for qgroup.
3836  *
3837  * Gets called when committing one transaction.
3838  */
3839 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
3840 {
3841 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
3842 	int i;
3843 
3844 	swapped_blocks = &root->swapped_blocks;
3845 
3846 	spin_lock(&swapped_blocks->lock);
3847 	if (!swapped_blocks->swapped)
3848 		goto out;
3849 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3850 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
3851 		struct btrfs_qgroup_swapped_block *entry;
3852 		struct btrfs_qgroup_swapped_block *next;
3853 
3854 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
3855 						     node)
3856 			kfree(entry);
3857 		swapped_blocks->blocks[i] = RB_ROOT;
3858 	}
3859 	swapped_blocks->swapped = false;
3860 out:
3861 	spin_unlock(&swapped_blocks->lock);
3862 }
3863 
3864 /*
3865  * Add subtree roots record into @subvol_root.
3866  *
3867  * @subvol_root:	tree root of the subvolume tree get swapped
3868  * @bg:			block group under balance
3869  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
3870  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
3871  *			BOTH POINTERS ARE BEFORE TREE SWAP
3872  * @last_snapshot:	last snapshot generation of the subvolume tree
3873  */
3874 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
3875 		struct btrfs_root *subvol_root,
3876 		struct btrfs_block_group_cache *bg,
3877 		struct extent_buffer *subvol_parent, int subvol_slot,
3878 		struct extent_buffer *reloc_parent, int reloc_slot,
3879 		u64 last_snapshot)
3880 {
3881 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
3882 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
3883 	struct btrfs_qgroup_swapped_block *block;
3884 	struct rb_node **cur;
3885 	struct rb_node *parent = NULL;
3886 	int level = btrfs_header_level(subvol_parent) - 1;
3887 	int ret = 0;
3888 
3889 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
3890 		return 0;
3891 
3892 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
3893 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
3894 		btrfs_err_rl(fs_info,
3895 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
3896 			__func__,
3897 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
3898 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
3899 		return -EUCLEAN;
3900 	}
3901 
3902 	block = kmalloc(sizeof(*block), GFP_NOFS);
3903 	if (!block) {
3904 		ret = -ENOMEM;
3905 		goto out;
3906 	}
3907 
3908 	/*
3909 	 * @reloc_parent/slot is still before swap, while @block is going to
3910 	 * record the bytenr after swap, so we do the swap here.
3911 	 */
3912 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
3913 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
3914 							     reloc_slot);
3915 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
3916 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
3917 							    subvol_slot);
3918 	block->last_snapshot = last_snapshot;
3919 	block->level = level;
3920 	if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
3921 		block->trace_leaf = true;
3922 	else
3923 		block->trace_leaf = false;
3924 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
3925 
3926 	/* Insert @block into @blocks */
3927 	spin_lock(&blocks->lock);
3928 	cur = &blocks->blocks[level].rb_node;
3929 	while (*cur) {
3930 		struct btrfs_qgroup_swapped_block *entry;
3931 
3932 		parent = *cur;
3933 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
3934 				 node);
3935 
3936 		if (entry->subvol_bytenr < block->subvol_bytenr) {
3937 			cur = &(*cur)->rb_left;
3938 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
3939 			cur = &(*cur)->rb_right;
3940 		} else {
3941 			if (entry->subvol_generation !=
3942 					block->subvol_generation ||
3943 			    entry->reloc_bytenr != block->reloc_bytenr ||
3944 			    entry->reloc_generation !=
3945 					block->reloc_generation) {
3946 				/*
3947 				 * Duplicated but mismatch entry found.
3948 				 * Shouldn't happen.
3949 				 *
3950 				 * Marking qgroup inconsistent should be enough
3951 				 * for end users.
3952 				 */
3953 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3954 				ret = -EEXIST;
3955 			}
3956 			kfree(block);
3957 			goto out_unlock;
3958 		}
3959 	}
3960 	rb_link_node(&block->node, parent, cur);
3961 	rb_insert_color(&block->node, &blocks->blocks[level]);
3962 	blocks->swapped = true;
3963 out_unlock:
3964 	spin_unlock(&blocks->lock);
3965 out:
3966 	if (ret < 0)
3967 		fs_info->qgroup_flags |=
3968 			BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3969 	return ret;
3970 }
3971 
3972 /*
3973  * Check if the tree block is a subtree root, and if so do the needed
3974  * delayed subtree trace for qgroup.
3975  *
3976  * This is called during btrfs_cow_block().
3977  */
3978 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
3979 					 struct btrfs_root *root,
3980 					 struct extent_buffer *subvol_eb)
3981 {
3982 	struct btrfs_fs_info *fs_info = root->fs_info;
3983 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
3984 	struct btrfs_qgroup_swapped_block *block;
3985 	struct extent_buffer *reloc_eb = NULL;
3986 	struct rb_node *node;
3987 	bool found = false;
3988 	bool swapped = false;
3989 	int level = btrfs_header_level(subvol_eb);
3990 	int ret = 0;
3991 	int i;
3992 
3993 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
3994 		return 0;
3995 	if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
3996 		return 0;
3997 
3998 	spin_lock(&blocks->lock);
3999 	if (!blocks->swapped) {
4000 		spin_unlock(&blocks->lock);
4001 		return 0;
4002 	}
4003 	node = blocks->blocks[level].rb_node;
4004 
4005 	while (node) {
4006 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4007 		if (block->subvol_bytenr < subvol_eb->start) {
4008 			node = node->rb_left;
4009 		} else if (block->subvol_bytenr > subvol_eb->start) {
4010 			node = node->rb_right;
4011 		} else {
4012 			found = true;
4013 			break;
4014 		}
4015 	}
4016 	if (!found) {
4017 		spin_unlock(&blocks->lock);
4018 		goto out;
4019 	}
4020 	/* Found one, remove it from @blocks first and update blocks->swapped */
4021 	rb_erase(&block->node, &blocks->blocks[level]);
4022 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4023 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4024 			swapped = true;
4025 			break;
4026 		}
4027 	}
4028 	blocks->swapped = swapped;
4029 	spin_unlock(&blocks->lock);
4030 
4031 	/* Read out reloc subtree root */
4032 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
4033 				   block->reloc_generation, block->level,
4034 				   &block->first_key);
4035 	if (IS_ERR(reloc_eb)) {
4036 		ret = PTR_ERR(reloc_eb);
4037 		reloc_eb = NULL;
4038 		goto free_out;
4039 	}
4040 	if (!extent_buffer_uptodate(reloc_eb)) {
4041 		ret = -EIO;
4042 		goto free_out;
4043 	}
4044 
4045 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4046 			block->last_snapshot, block->trace_leaf);
4047 free_out:
4048 	kfree(block);
4049 	free_extent_buffer(reloc_eb);
4050 out:
4051 	if (ret < 0) {
4052 		btrfs_err_rl(fs_info,
4053 			     "failed to account subtree at bytenr %llu: %d",
4054 			     subvol_eb->start, ret);
4055 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4056 	}
4057 	return ret;
4058 }
4059