xref: /openbmc/linux/fs/btrfs/qgroup.c (revision 5b4cb650)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 
25 
26 /* TODO XXX FIXME
27  *  - subvol delete -> delete when ref goes to 0? delete limits also?
28  *  - reorganize keys
29  *  - compressed
30  *  - sync
31  *  - copy also limits on subvol creation
32  *  - limit
33  *  - caches for ulists
34  *  - performance benchmarks
35  *  - check all ioctl parameters
36  */
37 
38 /*
39  * Helpers to access qgroup reservation
40  *
41  * Callers should ensure the lock context and type are valid
42  */
43 
44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 {
46 	u64 ret = 0;
47 	int i;
48 
49 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
50 		ret += qgroup->rsv.values[i];
51 
52 	return ret;
53 }
54 
55 #ifdef CONFIG_BTRFS_DEBUG
56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
57 {
58 	if (type == BTRFS_QGROUP_RSV_DATA)
59 		return "data";
60 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
61 		return "meta_pertrans";
62 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
63 		return "meta_prealloc";
64 	return NULL;
65 }
66 #endif
67 
68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
69 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
70 			   enum btrfs_qgroup_rsv_type type)
71 {
72 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
73 	qgroup->rsv.values[type] += num_bytes;
74 }
75 
76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
77 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
78 			       enum btrfs_qgroup_rsv_type type)
79 {
80 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
81 	if (qgroup->rsv.values[type] >= num_bytes) {
82 		qgroup->rsv.values[type] -= num_bytes;
83 		return;
84 	}
85 #ifdef CONFIG_BTRFS_DEBUG
86 	WARN_RATELIMIT(1,
87 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 		qgroup->qgroupid, qgroup_rsv_type_str(type),
89 		qgroup->rsv.values[type], num_bytes);
90 #endif
91 	qgroup->rsv.values[type] = 0;
92 }
93 
94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
95 				     struct btrfs_qgroup *dest,
96 				     struct btrfs_qgroup *src)
97 {
98 	int i;
99 
100 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
101 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
102 }
103 
104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
105 					 struct btrfs_qgroup *dest,
106 					  struct btrfs_qgroup *src)
107 {
108 	int i;
109 
110 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
111 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
112 }
113 
114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
115 					   int mod)
116 {
117 	if (qg->old_refcnt < seq)
118 		qg->old_refcnt = seq;
119 	qg->old_refcnt += mod;
120 }
121 
122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
123 					   int mod)
124 {
125 	if (qg->new_refcnt < seq)
126 		qg->new_refcnt = seq;
127 	qg->new_refcnt += mod;
128 }
129 
130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
131 {
132 	if (qg->old_refcnt < seq)
133 		return 0;
134 	return qg->old_refcnt - seq;
135 }
136 
137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
138 {
139 	if (qg->new_refcnt < seq)
140 		return 0;
141 	return qg->new_refcnt - seq;
142 }
143 
144 /*
145  * glue structure to represent the relations between qgroups.
146  */
147 struct btrfs_qgroup_list {
148 	struct list_head next_group;
149 	struct list_head next_member;
150 	struct btrfs_qgroup *group;
151 	struct btrfs_qgroup *member;
152 };
153 
154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
155 {
156 	return (u64)(uintptr_t)qg;
157 }
158 
159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
160 {
161 	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
162 }
163 
164 static int
165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
166 		   int init_flags);
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
168 
169 /* must be called with qgroup_ioctl_lock held */
170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
171 					   u64 qgroupid)
172 {
173 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
174 	struct btrfs_qgroup *qgroup;
175 
176 	while (n) {
177 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
178 		if (qgroup->qgroupid < qgroupid)
179 			n = n->rb_left;
180 		else if (qgroup->qgroupid > qgroupid)
181 			n = n->rb_right;
182 		else
183 			return qgroup;
184 	}
185 	return NULL;
186 }
187 
188 /* must be called with qgroup_lock held */
189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
190 					  u64 qgroupid)
191 {
192 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
193 	struct rb_node *parent = NULL;
194 	struct btrfs_qgroup *qgroup;
195 
196 	while (*p) {
197 		parent = *p;
198 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
199 
200 		if (qgroup->qgroupid < qgroupid)
201 			p = &(*p)->rb_left;
202 		else if (qgroup->qgroupid > qgroupid)
203 			p = &(*p)->rb_right;
204 		else
205 			return qgroup;
206 	}
207 
208 	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
209 	if (!qgroup)
210 		return ERR_PTR(-ENOMEM);
211 
212 	qgroup->qgroupid = qgroupid;
213 	INIT_LIST_HEAD(&qgroup->groups);
214 	INIT_LIST_HEAD(&qgroup->members);
215 	INIT_LIST_HEAD(&qgroup->dirty);
216 
217 	rb_link_node(&qgroup->node, parent, p);
218 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
219 
220 	return qgroup;
221 }
222 
223 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
224 {
225 	struct btrfs_qgroup_list *list;
226 
227 	list_del(&qgroup->dirty);
228 	while (!list_empty(&qgroup->groups)) {
229 		list = list_first_entry(&qgroup->groups,
230 					struct btrfs_qgroup_list, next_group);
231 		list_del(&list->next_group);
232 		list_del(&list->next_member);
233 		kfree(list);
234 	}
235 
236 	while (!list_empty(&qgroup->members)) {
237 		list = list_first_entry(&qgroup->members,
238 					struct btrfs_qgroup_list, next_member);
239 		list_del(&list->next_group);
240 		list_del(&list->next_member);
241 		kfree(list);
242 	}
243 	kfree(qgroup);
244 }
245 
246 /* must be called with qgroup_lock held */
247 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
248 {
249 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
250 
251 	if (!qgroup)
252 		return -ENOENT;
253 
254 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
255 	__del_qgroup_rb(qgroup);
256 	return 0;
257 }
258 
259 /* must be called with qgroup_lock held */
260 static int add_relation_rb(struct btrfs_fs_info *fs_info,
261 			   u64 memberid, u64 parentid)
262 {
263 	struct btrfs_qgroup *member;
264 	struct btrfs_qgroup *parent;
265 	struct btrfs_qgroup_list *list;
266 
267 	member = find_qgroup_rb(fs_info, memberid);
268 	parent = find_qgroup_rb(fs_info, parentid);
269 	if (!member || !parent)
270 		return -ENOENT;
271 
272 	list = kzalloc(sizeof(*list), GFP_ATOMIC);
273 	if (!list)
274 		return -ENOMEM;
275 
276 	list->group = parent;
277 	list->member = member;
278 	list_add_tail(&list->next_group, &member->groups);
279 	list_add_tail(&list->next_member, &parent->members);
280 
281 	return 0;
282 }
283 
284 /* must be called with qgroup_lock held */
285 static int del_relation_rb(struct btrfs_fs_info *fs_info,
286 			   u64 memberid, u64 parentid)
287 {
288 	struct btrfs_qgroup *member;
289 	struct btrfs_qgroup *parent;
290 	struct btrfs_qgroup_list *list;
291 
292 	member = find_qgroup_rb(fs_info, memberid);
293 	parent = find_qgroup_rb(fs_info, parentid);
294 	if (!member || !parent)
295 		return -ENOENT;
296 
297 	list_for_each_entry(list, &member->groups, next_group) {
298 		if (list->group == parent) {
299 			list_del(&list->next_group);
300 			list_del(&list->next_member);
301 			kfree(list);
302 			return 0;
303 		}
304 	}
305 	return -ENOENT;
306 }
307 
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
310 			       u64 rfer, u64 excl)
311 {
312 	struct btrfs_qgroup *qgroup;
313 
314 	qgroup = find_qgroup_rb(fs_info, qgroupid);
315 	if (!qgroup)
316 		return -EINVAL;
317 	if (qgroup->rfer != rfer || qgroup->excl != excl)
318 		return -EINVAL;
319 	return 0;
320 }
321 #endif
322 
323 /*
324  * The full config is read in one go, only called from open_ctree()
325  * It doesn't use any locking, as at this point we're still single-threaded
326  */
327 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
328 {
329 	struct btrfs_key key;
330 	struct btrfs_key found_key;
331 	struct btrfs_root *quota_root = fs_info->quota_root;
332 	struct btrfs_path *path = NULL;
333 	struct extent_buffer *l;
334 	int slot;
335 	int ret = 0;
336 	u64 flags = 0;
337 	u64 rescan_progress = 0;
338 
339 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
340 		return 0;
341 
342 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
343 	if (!fs_info->qgroup_ulist) {
344 		ret = -ENOMEM;
345 		goto out;
346 	}
347 
348 	path = btrfs_alloc_path();
349 	if (!path) {
350 		ret = -ENOMEM;
351 		goto out;
352 	}
353 
354 	/* default this to quota off, in case no status key is found */
355 	fs_info->qgroup_flags = 0;
356 
357 	/*
358 	 * pass 1: read status, all qgroup infos and limits
359 	 */
360 	key.objectid = 0;
361 	key.type = 0;
362 	key.offset = 0;
363 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
364 	if (ret)
365 		goto out;
366 
367 	while (1) {
368 		struct btrfs_qgroup *qgroup;
369 
370 		slot = path->slots[0];
371 		l = path->nodes[0];
372 		btrfs_item_key_to_cpu(l, &found_key, slot);
373 
374 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
375 			struct btrfs_qgroup_status_item *ptr;
376 
377 			ptr = btrfs_item_ptr(l, slot,
378 					     struct btrfs_qgroup_status_item);
379 
380 			if (btrfs_qgroup_status_version(l, ptr) !=
381 			    BTRFS_QGROUP_STATUS_VERSION) {
382 				btrfs_err(fs_info,
383 				 "old qgroup version, quota disabled");
384 				goto out;
385 			}
386 			if (btrfs_qgroup_status_generation(l, ptr) !=
387 			    fs_info->generation) {
388 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
389 				btrfs_err(fs_info,
390 					"qgroup generation mismatch, marked as inconsistent");
391 			}
392 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
393 									  ptr);
394 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
395 			goto next1;
396 		}
397 
398 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
399 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
400 			goto next1;
401 
402 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
403 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
404 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
405 			btrfs_err(fs_info, "inconsistent qgroup config");
406 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
407 		}
408 		if (!qgroup) {
409 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
410 			if (IS_ERR(qgroup)) {
411 				ret = PTR_ERR(qgroup);
412 				goto out;
413 			}
414 		}
415 		switch (found_key.type) {
416 		case BTRFS_QGROUP_INFO_KEY: {
417 			struct btrfs_qgroup_info_item *ptr;
418 
419 			ptr = btrfs_item_ptr(l, slot,
420 					     struct btrfs_qgroup_info_item);
421 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
422 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
423 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
424 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
425 			/* generation currently unused */
426 			break;
427 		}
428 		case BTRFS_QGROUP_LIMIT_KEY: {
429 			struct btrfs_qgroup_limit_item *ptr;
430 
431 			ptr = btrfs_item_ptr(l, slot,
432 					     struct btrfs_qgroup_limit_item);
433 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
434 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
435 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
436 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
437 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
438 			break;
439 		}
440 		}
441 next1:
442 		ret = btrfs_next_item(quota_root, path);
443 		if (ret < 0)
444 			goto out;
445 		if (ret)
446 			break;
447 	}
448 	btrfs_release_path(path);
449 
450 	/*
451 	 * pass 2: read all qgroup relations
452 	 */
453 	key.objectid = 0;
454 	key.type = BTRFS_QGROUP_RELATION_KEY;
455 	key.offset = 0;
456 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
457 	if (ret)
458 		goto out;
459 	while (1) {
460 		slot = path->slots[0];
461 		l = path->nodes[0];
462 		btrfs_item_key_to_cpu(l, &found_key, slot);
463 
464 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
465 			goto next2;
466 
467 		if (found_key.objectid > found_key.offset) {
468 			/* parent <- member, not needed to build config */
469 			/* FIXME should we omit the key completely? */
470 			goto next2;
471 		}
472 
473 		ret = add_relation_rb(fs_info, found_key.objectid,
474 				      found_key.offset);
475 		if (ret == -ENOENT) {
476 			btrfs_warn(fs_info,
477 				"orphan qgroup relation 0x%llx->0x%llx",
478 				found_key.objectid, found_key.offset);
479 			ret = 0;	/* ignore the error */
480 		}
481 		if (ret)
482 			goto out;
483 next2:
484 		ret = btrfs_next_item(quota_root, path);
485 		if (ret < 0)
486 			goto out;
487 		if (ret)
488 			break;
489 	}
490 out:
491 	fs_info->qgroup_flags |= flags;
492 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
493 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
494 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
495 		 ret >= 0)
496 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
497 	btrfs_free_path(path);
498 
499 	if (ret < 0) {
500 		ulist_free(fs_info->qgroup_ulist);
501 		fs_info->qgroup_ulist = NULL;
502 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
503 	}
504 
505 	return ret < 0 ? ret : 0;
506 }
507 
508 /*
509  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510  * first two are in single-threaded paths.And for the third one, we have set
511  * quota_root to be null with qgroup_lock held before, so it is safe to clean
512  * up the in-memory structures without qgroup_lock held.
513  */
514 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
515 {
516 	struct rb_node *n;
517 	struct btrfs_qgroup *qgroup;
518 
519 	while ((n = rb_first(&fs_info->qgroup_tree))) {
520 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
521 		rb_erase(n, &fs_info->qgroup_tree);
522 		__del_qgroup_rb(qgroup);
523 	}
524 	/*
525 	 * We call btrfs_free_qgroup_config() when unmounting
526 	 * filesystem and disabling quota, so we set qgroup_ulist
527 	 * to be null here to avoid double free.
528 	 */
529 	ulist_free(fs_info->qgroup_ulist);
530 	fs_info->qgroup_ulist = NULL;
531 }
532 
533 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
534 				    u64 dst)
535 {
536 	int ret;
537 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
538 	struct btrfs_path *path;
539 	struct btrfs_key key;
540 
541 	path = btrfs_alloc_path();
542 	if (!path)
543 		return -ENOMEM;
544 
545 	key.objectid = src;
546 	key.type = BTRFS_QGROUP_RELATION_KEY;
547 	key.offset = dst;
548 
549 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
550 
551 	btrfs_mark_buffer_dirty(path->nodes[0]);
552 
553 	btrfs_free_path(path);
554 	return ret;
555 }
556 
557 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
558 				    u64 dst)
559 {
560 	int ret;
561 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
562 	struct btrfs_path *path;
563 	struct btrfs_key key;
564 
565 	path = btrfs_alloc_path();
566 	if (!path)
567 		return -ENOMEM;
568 
569 	key.objectid = src;
570 	key.type = BTRFS_QGROUP_RELATION_KEY;
571 	key.offset = dst;
572 
573 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
574 	if (ret < 0)
575 		goto out;
576 
577 	if (ret > 0) {
578 		ret = -ENOENT;
579 		goto out;
580 	}
581 
582 	ret = btrfs_del_item(trans, quota_root, path);
583 out:
584 	btrfs_free_path(path);
585 	return ret;
586 }
587 
588 static int add_qgroup_item(struct btrfs_trans_handle *trans,
589 			   struct btrfs_root *quota_root, u64 qgroupid)
590 {
591 	int ret;
592 	struct btrfs_path *path;
593 	struct btrfs_qgroup_info_item *qgroup_info;
594 	struct btrfs_qgroup_limit_item *qgroup_limit;
595 	struct extent_buffer *leaf;
596 	struct btrfs_key key;
597 
598 	if (btrfs_is_testing(quota_root->fs_info))
599 		return 0;
600 
601 	path = btrfs_alloc_path();
602 	if (!path)
603 		return -ENOMEM;
604 
605 	key.objectid = 0;
606 	key.type = BTRFS_QGROUP_INFO_KEY;
607 	key.offset = qgroupid;
608 
609 	/*
610 	 * Avoid a transaction abort by catching -EEXIST here. In that
611 	 * case, we proceed by re-initializing the existing structure
612 	 * on disk.
613 	 */
614 
615 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
616 				      sizeof(*qgroup_info));
617 	if (ret && ret != -EEXIST)
618 		goto out;
619 
620 	leaf = path->nodes[0];
621 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
622 				 struct btrfs_qgroup_info_item);
623 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
624 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
625 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
626 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
627 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
628 
629 	btrfs_mark_buffer_dirty(leaf);
630 
631 	btrfs_release_path(path);
632 
633 	key.type = BTRFS_QGROUP_LIMIT_KEY;
634 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
635 				      sizeof(*qgroup_limit));
636 	if (ret && ret != -EEXIST)
637 		goto out;
638 
639 	leaf = path->nodes[0];
640 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
641 				  struct btrfs_qgroup_limit_item);
642 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
643 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
644 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
645 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
646 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
647 
648 	btrfs_mark_buffer_dirty(leaf);
649 
650 	ret = 0;
651 out:
652 	btrfs_free_path(path);
653 	return ret;
654 }
655 
656 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
657 {
658 	int ret;
659 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
660 	struct btrfs_path *path;
661 	struct btrfs_key key;
662 
663 	path = btrfs_alloc_path();
664 	if (!path)
665 		return -ENOMEM;
666 
667 	key.objectid = 0;
668 	key.type = BTRFS_QGROUP_INFO_KEY;
669 	key.offset = qgroupid;
670 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
671 	if (ret < 0)
672 		goto out;
673 
674 	if (ret > 0) {
675 		ret = -ENOENT;
676 		goto out;
677 	}
678 
679 	ret = btrfs_del_item(trans, quota_root, path);
680 	if (ret)
681 		goto out;
682 
683 	btrfs_release_path(path);
684 
685 	key.type = BTRFS_QGROUP_LIMIT_KEY;
686 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
687 	if (ret < 0)
688 		goto out;
689 
690 	if (ret > 0) {
691 		ret = -ENOENT;
692 		goto out;
693 	}
694 
695 	ret = btrfs_del_item(trans, quota_root, path);
696 
697 out:
698 	btrfs_free_path(path);
699 	return ret;
700 }
701 
702 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
703 				    struct btrfs_qgroup *qgroup)
704 {
705 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
706 	struct btrfs_path *path;
707 	struct btrfs_key key;
708 	struct extent_buffer *l;
709 	struct btrfs_qgroup_limit_item *qgroup_limit;
710 	int ret;
711 	int slot;
712 
713 	key.objectid = 0;
714 	key.type = BTRFS_QGROUP_LIMIT_KEY;
715 	key.offset = qgroup->qgroupid;
716 
717 	path = btrfs_alloc_path();
718 	if (!path)
719 		return -ENOMEM;
720 
721 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
722 	if (ret > 0)
723 		ret = -ENOENT;
724 
725 	if (ret)
726 		goto out;
727 
728 	l = path->nodes[0];
729 	slot = path->slots[0];
730 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
731 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
732 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
733 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
734 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
735 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
736 
737 	btrfs_mark_buffer_dirty(l);
738 
739 out:
740 	btrfs_free_path(path);
741 	return ret;
742 }
743 
744 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
745 				   struct btrfs_qgroup *qgroup)
746 {
747 	struct btrfs_fs_info *fs_info = trans->fs_info;
748 	struct btrfs_root *quota_root = fs_info->quota_root;
749 	struct btrfs_path *path;
750 	struct btrfs_key key;
751 	struct extent_buffer *l;
752 	struct btrfs_qgroup_info_item *qgroup_info;
753 	int ret;
754 	int slot;
755 
756 	if (btrfs_is_testing(fs_info))
757 		return 0;
758 
759 	key.objectid = 0;
760 	key.type = BTRFS_QGROUP_INFO_KEY;
761 	key.offset = qgroup->qgroupid;
762 
763 	path = btrfs_alloc_path();
764 	if (!path)
765 		return -ENOMEM;
766 
767 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
768 	if (ret > 0)
769 		ret = -ENOENT;
770 
771 	if (ret)
772 		goto out;
773 
774 	l = path->nodes[0];
775 	slot = path->slots[0];
776 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
777 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
778 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
779 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
780 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
781 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
782 
783 	btrfs_mark_buffer_dirty(l);
784 
785 out:
786 	btrfs_free_path(path);
787 	return ret;
788 }
789 
790 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
791 {
792 	struct btrfs_fs_info *fs_info = trans->fs_info;
793 	struct btrfs_root *quota_root = fs_info->quota_root;
794 	struct btrfs_path *path;
795 	struct btrfs_key key;
796 	struct extent_buffer *l;
797 	struct btrfs_qgroup_status_item *ptr;
798 	int ret;
799 	int slot;
800 
801 	key.objectid = 0;
802 	key.type = BTRFS_QGROUP_STATUS_KEY;
803 	key.offset = 0;
804 
805 	path = btrfs_alloc_path();
806 	if (!path)
807 		return -ENOMEM;
808 
809 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
810 	if (ret > 0)
811 		ret = -ENOENT;
812 
813 	if (ret)
814 		goto out;
815 
816 	l = path->nodes[0];
817 	slot = path->slots[0];
818 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
819 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
820 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
821 	btrfs_set_qgroup_status_rescan(l, ptr,
822 				fs_info->qgroup_rescan_progress.objectid);
823 
824 	btrfs_mark_buffer_dirty(l);
825 
826 out:
827 	btrfs_free_path(path);
828 	return ret;
829 }
830 
831 /*
832  * called with qgroup_lock held
833  */
834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
835 				  struct btrfs_root *root)
836 {
837 	struct btrfs_path *path;
838 	struct btrfs_key key;
839 	struct extent_buffer *leaf = NULL;
840 	int ret;
841 	int nr = 0;
842 
843 	path = btrfs_alloc_path();
844 	if (!path)
845 		return -ENOMEM;
846 
847 	path->leave_spinning = 1;
848 
849 	key.objectid = 0;
850 	key.offset = 0;
851 	key.type = 0;
852 
853 	while (1) {
854 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855 		if (ret < 0)
856 			goto out;
857 		leaf = path->nodes[0];
858 		nr = btrfs_header_nritems(leaf);
859 		if (!nr)
860 			break;
861 		/*
862 		 * delete the leaf one by one
863 		 * since the whole tree is going
864 		 * to be deleted.
865 		 */
866 		path->slots[0] = 0;
867 		ret = btrfs_del_items(trans, root, path, 0, nr);
868 		if (ret)
869 			goto out;
870 
871 		btrfs_release_path(path);
872 	}
873 	ret = 0;
874 out:
875 	btrfs_free_path(path);
876 	return ret;
877 }
878 
879 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
880 {
881 	struct btrfs_root *quota_root;
882 	struct btrfs_root *tree_root = fs_info->tree_root;
883 	struct btrfs_path *path = NULL;
884 	struct btrfs_qgroup_status_item *ptr;
885 	struct extent_buffer *leaf;
886 	struct btrfs_key key;
887 	struct btrfs_key found_key;
888 	struct btrfs_qgroup *qgroup = NULL;
889 	struct btrfs_trans_handle *trans = NULL;
890 	int ret = 0;
891 	int slot;
892 
893 	mutex_lock(&fs_info->qgroup_ioctl_lock);
894 	if (fs_info->quota_root)
895 		goto out;
896 
897 	/*
898 	 * 1 for quota root item
899 	 * 1 for BTRFS_QGROUP_STATUS item
900 	 *
901 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
902 	 * per subvolume. However those are not currently reserved since it
903 	 * would be a lot of overkill.
904 	 */
905 	trans = btrfs_start_transaction(tree_root, 2);
906 	if (IS_ERR(trans)) {
907 		ret = PTR_ERR(trans);
908 		trans = NULL;
909 		goto out;
910 	}
911 
912 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
913 	if (!fs_info->qgroup_ulist) {
914 		ret = -ENOMEM;
915 		btrfs_abort_transaction(trans, ret);
916 		goto out;
917 	}
918 
919 	/*
920 	 * initially create the quota tree
921 	 */
922 	quota_root = btrfs_create_tree(trans, fs_info,
923 				       BTRFS_QUOTA_TREE_OBJECTID);
924 	if (IS_ERR(quota_root)) {
925 		ret =  PTR_ERR(quota_root);
926 		btrfs_abort_transaction(trans, ret);
927 		goto out;
928 	}
929 
930 	path = btrfs_alloc_path();
931 	if (!path) {
932 		ret = -ENOMEM;
933 		btrfs_abort_transaction(trans, ret);
934 		goto out_free_root;
935 	}
936 
937 	key.objectid = 0;
938 	key.type = BTRFS_QGROUP_STATUS_KEY;
939 	key.offset = 0;
940 
941 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
942 				      sizeof(*ptr));
943 	if (ret) {
944 		btrfs_abort_transaction(trans, ret);
945 		goto out_free_path;
946 	}
947 
948 	leaf = path->nodes[0];
949 	ptr = btrfs_item_ptr(leaf, path->slots[0],
950 				 struct btrfs_qgroup_status_item);
951 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
952 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
953 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
954 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
955 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
956 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
957 
958 	btrfs_mark_buffer_dirty(leaf);
959 
960 	key.objectid = 0;
961 	key.type = BTRFS_ROOT_REF_KEY;
962 	key.offset = 0;
963 
964 	btrfs_release_path(path);
965 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
966 	if (ret > 0)
967 		goto out_add_root;
968 	if (ret < 0) {
969 		btrfs_abort_transaction(trans, ret);
970 		goto out_free_path;
971 	}
972 
973 	while (1) {
974 		slot = path->slots[0];
975 		leaf = path->nodes[0];
976 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
977 
978 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
979 			ret = add_qgroup_item(trans, quota_root,
980 					      found_key.offset);
981 			if (ret) {
982 				btrfs_abort_transaction(trans, ret);
983 				goto out_free_path;
984 			}
985 
986 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
987 			if (IS_ERR(qgroup)) {
988 				ret = PTR_ERR(qgroup);
989 				btrfs_abort_transaction(trans, ret);
990 				goto out_free_path;
991 			}
992 		}
993 		ret = btrfs_next_item(tree_root, path);
994 		if (ret < 0) {
995 			btrfs_abort_transaction(trans, ret);
996 			goto out_free_path;
997 		}
998 		if (ret)
999 			break;
1000 	}
1001 
1002 out_add_root:
1003 	btrfs_release_path(path);
1004 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1005 	if (ret) {
1006 		btrfs_abort_transaction(trans, ret);
1007 		goto out_free_path;
1008 	}
1009 
1010 	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1011 	if (IS_ERR(qgroup)) {
1012 		ret = PTR_ERR(qgroup);
1013 		btrfs_abort_transaction(trans, ret);
1014 		goto out_free_path;
1015 	}
1016 
1017 	ret = btrfs_commit_transaction(trans);
1018 	trans = NULL;
1019 	if (ret)
1020 		goto out_free_path;
1021 
1022 	/*
1023 	 * Set quota enabled flag after committing the transaction, to avoid
1024 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1025 	 * creation.
1026 	 */
1027 	spin_lock(&fs_info->qgroup_lock);
1028 	fs_info->quota_root = quota_root;
1029 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1030 	spin_unlock(&fs_info->qgroup_lock);
1031 
1032 	ret = qgroup_rescan_init(fs_info, 0, 1);
1033 	if (!ret) {
1034 	        qgroup_rescan_zero_tracking(fs_info);
1035 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1036 	                         &fs_info->qgroup_rescan_work);
1037 	}
1038 
1039 out_free_path:
1040 	btrfs_free_path(path);
1041 out_free_root:
1042 	if (ret) {
1043 		free_extent_buffer(quota_root->node);
1044 		free_extent_buffer(quota_root->commit_root);
1045 		kfree(quota_root);
1046 	}
1047 out:
1048 	if (ret) {
1049 		ulist_free(fs_info->qgroup_ulist);
1050 		fs_info->qgroup_ulist = NULL;
1051 		if (trans)
1052 			btrfs_end_transaction(trans);
1053 	}
1054 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1055 	return ret;
1056 }
1057 
1058 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1059 {
1060 	struct btrfs_root *quota_root;
1061 	struct btrfs_trans_handle *trans = NULL;
1062 	int ret = 0;
1063 
1064 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1065 	if (!fs_info->quota_root)
1066 		goto out;
1067 
1068 	/*
1069 	 * 1 For the root item
1070 	 *
1071 	 * We should also reserve enough items for the quota tree deletion in
1072 	 * btrfs_clean_quota_tree but this is not done.
1073 	 */
1074 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1075 	if (IS_ERR(trans)) {
1076 		ret = PTR_ERR(trans);
1077 		goto out;
1078 	}
1079 
1080 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1081 	btrfs_qgroup_wait_for_completion(fs_info, false);
1082 	spin_lock(&fs_info->qgroup_lock);
1083 	quota_root = fs_info->quota_root;
1084 	fs_info->quota_root = NULL;
1085 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1086 	spin_unlock(&fs_info->qgroup_lock);
1087 
1088 	btrfs_free_qgroup_config(fs_info);
1089 
1090 	ret = btrfs_clean_quota_tree(trans, quota_root);
1091 	if (ret) {
1092 		btrfs_abort_transaction(trans, ret);
1093 		goto end_trans;
1094 	}
1095 
1096 	ret = btrfs_del_root(trans, &quota_root->root_key);
1097 	if (ret) {
1098 		btrfs_abort_transaction(trans, ret);
1099 		goto end_trans;
1100 	}
1101 
1102 	list_del(&quota_root->dirty_list);
1103 
1104 	btrfs_tree_lock(quota_root->node);
1105 	clean_tree_block(fs_info, quota_root->node);
1106 	btrfs_tree_unlock(quota_root->node);
1107 	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1108 
1109 	free_extent_buffer(quota_root->node);
1110 	free_extent_buffer(quota_root->commit_root);
1111 	kfree(quota_root);
1112 
1113 end_trans:
1114 	ret = btrfs_end_transaction(trans);
1115 out:
1116 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1117 	return ret;
1118 }
1119 
1120 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1121 			 struct btrfs_qgroup *qgroup)
1122 {
1123 	if (list_empty(&qgroup->dirty))
1124 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1125 }
1126 
1127 /*
1128  * The easy accounting, we're updating qgroup relationship whose child qgroup
1129  * only has exclusive extents.
1130  *
1131  * In this case, all exclusive extents will also be exclusive for parent, so
1132  * excl/rfer just get added/removed.
1133  *
1134  * So is qgroup reservation space, which should also be added/removed to
1135  * parent.
1136  * Or when child tries to release reservation space, parent will underflow its
1137  * reservation (for relationship adding case).
1138  *
1139  * Caller should hold fs_info->qgroup_lock.
1140  */
1141 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1142 				    struct ulist *tmp, u64 ref_root,
1143 				    struct btrfs_qgroup *src, int sign)
1144 {
1145 	struct btrfs_qgroup *qgroup;
1146 	struct btrfs_qgroup_list *glist;
1147 	struct ulist_node *unode;
1148 	struct ulist_iterator uiter;
1149 	u64 num_bytes = src->excl;
1150 	int ret = 0;
1151 
1152 	qgroup = find_qgroup_rb(fs_info, ref_root);
1153 	if (!qgroup)
1154 		goto out;
1155 
1156 	qgroup->rfer += sign * num_bytes;
1157 	qgroup->rfer_cmpr += sign * num_bytes;
1158 
1159 	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1160 	qgroup->excl += sign * num_bytes;
1161 	qgroup->excl_cmpr += sign * num_bytes;
1162 
1163 	if (sign > 0)
1164 		qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1165 	else
1166 		qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1167 
1168 	qgroup_dirty(fs_info, qgroup);
1169 
1170 	/* Get all of the parent groups that contain this qgroup */
1171 	list_for_each_entry(glist, &qgroup->groups, next_group) {
1172 		ret = ulist_add(tmp, glist->group->qgroupid,
1173 				qgroup_to_aux(glist->group), GFP_ATOMIC);
1174 		if (ret < 0)
1175 			goto out;
1176 	}
1177 
1178 	/* Iterate all of the parents and adjust their reference counts */
1179 	ULIST_ITER_INIT(&uiter);
1180 	while ((unode = ulist_next(tmp, &uiter))) {
1181 		qgroup = unode_aux_to_qgroup(unode);
1182 		qgroup->rfer += sign * num_bytes;
1183 		qgroup->rfer_cmpr += sign * num_bytes;
1184 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1185 		qgroup->excl += sign * num_bytes;
1186 		if (sign > 0)
1187 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1188 		else
1189 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1190 		qgroup->excl_cmpr += sign * num_bytes;
1191 		qgroup_dirty(fs_info, qgroup);
1192 
1193 		/* Add any parents of the parents */
1194 		list_for_each_entry(glist, &qgroup->groups, next_group) {
1195 			ret = ulist_add(tmp, glist->group->qgroupid,
1196 					qgroup_to_aux(glist->group), GFP_ATOMIC);
1197 			if (ret < 0)
1198 				goto out;
1199 		}
1200 	}
1201 	ret = 0;
1202 out:
1203 	return ret;
1204 }
1205 
1206 
1207 /*
1208  * Quick path for updating qgroup with only excl refs.
1209  *
1210  * In that case, just update all parent will be enough.
1211  * Or we needs to do a full rescan.
1212  * Caller should also hold fs_info->qgroup_lock.
1213  *
1214  * Return 0 for quick update, return >0 for need to full rescan
1215  * and mark INCONSISTENT flag.
1216  * Return < 0 for other error.
1217  */
1218 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1219 				   struct ulist *tmp, u64 src, u64 dst,
1220 				   int sign)
1221 {
1222 	struct btrfs_qgroup *qgroup;
1223 	int ret = 1;
1224 	int err = 0;
1225 
1226 	qgroup = find_qgroup_rb(fs_info, src);
1227 	if (!qgroup)
1228 		goto out;
1229 	if (qgroup->excl == qgroup->rfer) {
1230 		ret = 0;
1231 		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1232 					       qgroup, sign);
1233 		if (err < 0) {
1234 			ret = err;
1235 			goto out;
1236 		}
1237 	}
1238 out:
1239 	if (ret)
1240 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1241 	return ret;
1242 }
1243 
1244 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1245 			      u64 dst)
1246 {
1247 	struct btrfs_fs_info *fs_info = trans->fs_info;
1248 	struct btrfs_root *quota_root;
1249 	struct btrfs_qgroup *parent;
1250 	struct btrfs_qgroup *member;
1251 	struct btrfs_qgroup_list *list;
1252 	struct ulist *tmp;
1253 	int ret = 0;
1254 
1255 	/* Check the level of src and dst first */
1256 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1257 		return -EINVAL;
1258 
1259 	tmp = ulist_alloc(GFP_KERNEL);
1260 	if (!tmp)
1261 		return -ENOMEM;
1262 
1263 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1264 	quota_root = fs_info->quota_root;
1265 	if (!quota_root) {
1266 		ret = -EINVAL;
1267 		goto out;
1268 	}
1269 	member = find_qgroup_rb(fs_info, src);
1270 	parent = find_qgroup_rb(fs_info, dst);
1271 	if (!member || !parent) {
1272 		ret = -EINVAL;
1273 		goto out;
1274 	}
1275 
1276 	/* check if such qgroup relation exist firstly */
1277 	list_for_each_entry(list, &member->groups, next_group) {
1278 		if (list->group == parent) {
1279 			ret = -EEXIST;
1280 			goto out;
1281 		}
1282 	}
1283 
1284 	ret = add_qgroup_relation_item(trans, src, dst);
1285 	if (ret)
1286 		goto out;
1287 
1288 	ret = add_qgroup_relation_item(trans, dst, src);
1289 	if (ret) {
1290 		del_qgroup_relation_item(trans, src, dst);
1291 		goto out;
1292 	}
1293 
1294 	spin_lock(&fs_info->qgroup_lock);
1295 	ret = add_relation_rb(fs_info, src, dst);
1296 	if (ret < 0) {
1297 		spin_unlock(&fs_info->qgroup_lock);
1298 		goto out;
1299 	}
1300 	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1301 	spin_unlock(&fs_info->qgroup_lock);
1302 out:
1303 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1304 	ulist_free(tmp);
1305 	return ret;
1306 }
1307 
1308 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1309 				 u64 dst)
1310 {
1311 	struct btrfs_fs_info *fs_info = trans->fs_info;
1312 	struct btrfs_root *quota_root;
1313 	struct btrfs_qgroup *parent;
1314 	struct btrfs_qgroup *member;
1315 	struct btrfs_qgroup_list *list;
1316 	struct ulist *tmp;
1317 	int ret = 0;
1318 	int err;
1319 
1320 	tmp = ulist_alloc(GFP_KERNEL);
1321 	if (!tmp)
1322 		return -ENOMEM;
1323 
1324 	quota_root = fs_info->quota_root;
1325 	if (!quota_root) {
1326 		ret = -EINVAL;
1327 		goto out;
1328 	}
1329 
1330 	member = find_qgroup_rb(fs_info, src);
1331 	parent = find_qgroup_rb(fs_info, dst);
1332 	if (!member || !parent) {
1333 		ret = -EINVAL;
1334 		goto out;
1335 	}
1336 
1337 	/* check if such qgroup relation exist firstly */
1338 	list_for_each_entry(list, &member->groups, next_group) {
1339 		if (list->group == parent)
1340 			goto exist;
1341 	}
1342 	ret = -ENOENT;
1343 	goto out;
1344 exist:
1345 	ret = del_qgroup_relation_item(trans, src, dst);
1346 	err = del_qgroup_relation_item(trans, dst, src);
1347 	if (err && !ret)
1348 		ret = err;
1349 
1350 	spin_lock(&fs_info->qgroup_lock);
1351 	del_relation_rb(fs_info, src, dst);
1352 	ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1353 	spin_unlock(&fs_info->qgroup_lock);
1354 out:
1355 	ulist_free(tmp);
1356 	return ret;
1357 }
1358 
1359 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1360 			      u64 dst)
1361 {
1362 	struct btrfs_fs_info *fs_info = trans->fs_info;
1363 	int ret = 0;
1364 
1365 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1366 	ret = __del_qgroup_relation(trans, src, dst);
1367 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1368 
1369 	return ret;
1370 }
1371 
1372 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1373 {
1374 	struct btrfs_fs_info *fs_info = trans->fs_info;
1375 	struct btrfs_root *quota_root;
1376 	struct btrfs_qgroup *qgroup;
1377 	int ret = 0;
1378 
1379 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1380 	quota_root = fs_info->quota_root;
1381 	if (!quota_root) {
1382 		ret = -EINVAL;
1383 		goto out;
1384 	}
1385 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1386 	if (qgroup) {
1387 		ret = -EEXIST;
1388 		goto out;
1389 	}
1390 
1391 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1392 	if (ret)
1393 		goto out;
1394 
1395 	spin_lock(&fs_info->qgroup_lock);
1396 	qgroup = add_qgroup_rb(fs_info, qgroupid);
1397 	spin_unlock(&fs_info->qgroup_lock);
1398 
1399 	if (IS_ERR(qgroup))
1400 		ret = PTR_ERR(qgroup);
1401 out:
1402 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1403 	return ret;
1404 }
1405 
1406 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1407 {
1408 	struct btrfs_fs_info *fs_info = trans->fs_info;
1409 	struct btrfs_root *quota_root;
1410 	struct btrfs_qgroup *qgroup;
1411 	struct btrfs_qgroup_list *list;
1412 	int ret = 0;
1413 
1414 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1415 	quota_root = fs_info->quota_root;
1416 	if (!quota_root) {
1417 		ret = -EINVAL;
1418 		goto out;
1419 	}
1420 
1421 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1422 	if (!qgroup) {
1423 		ret = -ENOENT;
1424 		goto out;
1425 	}
1426 
1427 	/* Check if there are no children of this qgroup */
1428 	if (!list_empty(&qgroup->members)) {
1429 		ret = -EBUSY;
1430 		goto out;
1431 	}
1432 
1433 	ret = del_qgroup_item(trans, qgroupid);
1434 	if (ret && ret != -ENOENT)
1435 		goto out;
1436 
1437 	while (!list_empty(&qgroup->groups)) {
1438 		list = list_first_entry(&qgroup->groups,
1439 					struct btrfs_qgroup_list, next_group);
1440 		ret = __del_qgroup_relation(trans, qgroupid,
1441 					    list->group->qgroupid);
1442 		if (ret)
1443 			goto out;
1444 	}
1445 
1446 	spin_lock(&fs_info->qgroup_lock);
1447 	del_qgroup_rb(fs_info, qgroupid);
1448 	spin_unlock(&fs_info->qgroup_lock);
1449 out:
1450 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1451 	return ret;
1452 }
1453 
1454 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1455 		       struct btrfs_qgroup_limit *limit)
1456 {
1457 	struct btrfs_fs_info *fs_info = trans->fs_info;
1458 	struct btrfs_root *quota_root;
1459 	struct btrfs_qgroup *qgroup;
1460 	int ret = 0;
1461 	/* Sometimes we would want to clear the limit on this qgroup.
1462 	 * To meet this requirement, we treat the -1 as a special value
1463 	 * which tell kernel to clear the limit on this qgroup.
1464 	 */
1465 	const u64 CLEAR_VALUE = -1;
1466 
1467 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1468 	quota_root = fs_info->quota_root;
1469 	if (!quota_root) {
1470 		ret = -EINVAL;
1471 		goto out;
1472 	}
1473 
1474 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1475 	if (!qgroup) {
1476 		ret = -ENOENT;
1477 		goto out;
1478 	}
1479 
1480 	spin_lock(&fs_info->qgroup_lock);
1481 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1482 		if (limit->max_rfer == CLEAR_VALUE) {
1483 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1484 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1485 			qgroup->max_rfer = 0;
1486 		} else {
1487 			qgroup->max_rfer = limit->max_rfer;
1488 		}
1489 	}
1490 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1491 		if (limit->max_excl == CLEAR_VALUE) {
1492 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1493 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1494 			qgroup->max_excl = 0;
1495 		} else {
1496 			qgroup->max_excl = limit->max_excl;
1497 		}
1498 	}
1499 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1500 		if (limit->rsv_rfer == CLEAR_VALUE) {
1501 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1502 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1503 			qgroup->rsv_rfer = 0;
1504 		} else {
1505 			qgroup->rsv_rfer = limit->rsv_rfer;
1506 		}
1507 	}
1508 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1509 		if (limit->rsv_excl == CLEAR_VALUE) {
1510 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1511 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1512 			qgroup->rsv_excl = 0;
1513 		} else {
1514 			qgroup->rsv_excl = limit->rsv_excl;
1515 		}
1516 	}
1517 	qgroup->lim_flags |= limit->flags;
1518 
1519 	spin_unlock(&fs_info->qgroup_lock);
1520 
1521 	ret = update_qgroup_limit_item(trans, qgroup);
1522 	if (ret) {
1523 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1524 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1525 		       qgroupid);
1526 	}
1527 
1528 out:
1529 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1530 	return ret;
1531 }
1532 
1533 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1534 				struct btrfs_delayed_ref_root *delayed_refs,
1535 				struct btrfs_qgroup_extent_record *record)
1536 {
1537 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1538 	struct rb_node *parent_node = NULL;
1539 	struct btrfs_qgroup_extent_record *entry;
1540 	u64 bytenr = record->bytenr;
1541 
1542 	lockdep_assert_held(&delayed_refs->lock);
1543 	trace_btrfs_qgroup_trace_extent(fs_info, record);
1544 
1545 	while (*p) {
1546 		parent_node = *p;
1547 		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1548 				 node);
1549 		if (bytenr < entry->bytenr)
1550 			p = &(*p)->rb_left;
1551 		else if (bytenr > entry->bytenr)
1552 			p = &(*p)->rb_right;
1553 		else
1554 			return 1;
1555 	}
1556 
1557 	rb_link_node(&record->node, parent_node, p);
1558 	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1559 	return 0;
1560 }
1561 
1562 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1563 				   struct btrfs_qgroup_extent_record *qrecord)
1564 {
1565 	struct ulist *old_root;
1566 	u64 bytenr = qrecord->bytenr;
1567 	int ret;
1568 
1569 	ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1570 	if (ret < 0) {
1571 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1572 		btrfs_warn(fs_info,
1573 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1574 			ret);
1575 		return 0;
1576 	}
1577 
1578 	/*
1579 	 * Here we don't need to get the lock of
1580 	 * trans->transaction->delayed_refs, since inserted qrecord won't
1581 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
1582 	 *
1583 	 * So modifying qrecord->old_roots is safe here
1584 	 */
1585 	qrecord->old_roots = old_root;
1586 	return 0;
1587 }
1588 
1589 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1590 			      u64 num_bytes, gfp_t gfp_flag)
1591 {
1592 	struct btrfs_fs_info *fs_info = trans->fs_info;
1593 	struct btrfs_qgroup_extent_record *record;
1594 	struct btrfs_delayed_ref_root *delayed_refs;
1595 	int ret;
1596 
1597 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1598 	    || bytenr == 0 || num_bytes == 0)
1599 		return 0;
1600 	record = kmalloc(sizeof(*record), gfp_flag);
1601 	if (!record)
1602 		return -ENOMEM;
1603 
1604 	delayed_refs = &trans->transaction->delayed_refs;
1605 	record->bytenr = bytenr;
1606 	record->num_bytes = num_bytes;
1607 	record->old_roots = NULL;
1608 
1609 	spin_lock(&delayed_refs->lock);
1610 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1611 	spin_unlock(&delayed_refs->lock);
1612 	if (ret > 0) {
1613 		kfree(record);
1614 		return 0;
1615 	}
1616 	return btrfs_qgroup_trace_extent_post(fs_info, record);
1617 }
1618 
1619 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1620 				  struct extent_buffer *eb)
1621 {
1622 	struct btrfs_fs_info *fs_info = trans->fs_info;
1623 	int nr = btrfs_header_nritems(eb);
1624 	int i, extent_type, ret;
1625 	struct btrfs_key key;
1626 	struct btrfs_file_extent_item *fi;
1627 	u64 bytenr, num_bytes;
1628 
1629 	/* We can be called directly from walk_up_proc() */
1630 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1631 		return 0;
1632 
1633 	for (i = 0; i < nr; i++) {
1634 		btrfs_item_key_to_cpu(eb, &key, i);
1635 
1636 		if (key.type != BTRFS_EXTENT_DATA_KEY)
1637 			continue;
1638 
1639 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1640 		/* filter out non qgroup-accountable extents  */
1641 		extent_type = btrfs_file_extent_type(eb, fi);
1642 
1643 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1644 			continue;
1645 
1646 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1647 		if (!bytenr)
1648 			continue;
1649 
1650 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1651 
1652 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1653 						GFP_NOFS);
1654 		if (ret)
1655 			return ret;
1656 	}
1657 	cond_resched();
1658 	return 0;
1659 }
1660 
1661 /*
1662  * Walk up the tree from the bottom, freeing leaves and any interior
1663  * nodes which have had all slots visited. If a node (leaf or
1664  * interior) is freed, the node above it will have it's slot
1665  * incremented. The root node will never be freed.
1666  *
1667  * At the end of this function, we should have a path which has all
1668  * slots incremented to the next position for a search. If we need to
1669  * read a new node it will be NULL and the node above it will have the
1670  * correct slot selected for a later read.
1671  *
1672  * If we increment the root nodes slot counter past the number of
1673  * elements, 1 is returned to signal completion of the search.
1674  */
1675 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1676 {
1677 	int level = 0;
1678 	int nr, slot;
1679 	struct extent_buffer *eb;
1680 
1681 	if (root_level == 0)
1682 		return 1;
1683 
1684 	while (level <= root_level) {
1685 		eb = path->nodes[level];
1686 		nr = btrfs_header_nritems(eb);
1687 		path->slots[level]++;
1688 		slot = path->slots[level];
1689 		if (slot >= nr || level == 0) {
1690 			/*
1691 			 * Don't free the root -  we will detect this
1692 			 * condition after our loop and return a
1693 			 * positive value for caller to stop walking the tree.
1694 			 */
1695 			if (level != root_level) {
1696 				btrfs_tree_unlock_rw(eb, path->locks[level]);
1697 				path->locks[level] = 0;
1698 
1699 				free_extent_buffer(eb);
1700 				path->nodes[level] = NULL;
1701 				path->slots[level] = 0;
1702 			}
1703 		} else {
1704 			/*
1705 			 * We have a valid slot to walk back down
1706 			 * from. Stop here so caller can process these
1707 			 * new nodes.
1708 			 */
1709 			break;
1710 		}
1711 
1712 		level++;
1713 	}
1714 
1715 	eb = path->nodes[root_level];
1716 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
1717 		return 1;
1718 
1719 	return 0;
1720 }
1721 
1722 /*
1723  * Helper function to trace a subtree tree block swap.
1724  *
1725  * The swap will happen in highest tree block, but there may be a lot of
1726  * tree blocks involved.
1727  *
1728  * For example:
1729  *  OO = Old tree blocks
1730  *  NN = New tree blocks allocated during balance
1731  *
1732  *           File tree (257)                  Reloc tree for 257
1733  * L2              OO                                NN
1734  *               /    \                            /    \
1735  * L1          OO      OO (a)                    OO      NN (a)
1736  *            / \     / \                       / \     / \
1737  * L0       OO   OO OO   OO                   OO   OO NN   NN
1738  *                  (b)  (c)                          (b)  (c)
1739  *
1740  * When calling qgroup_trace_extent_swap(), we will pass:
1741  * @src_eb = OO(a)
1742  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1743  * @dst_level = 0
1744  * @root_level = 1
1745  *
1746  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1747  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1748  *
1749  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1750  *
1751  * 1) Tree search from @src_eb
1752  *    It should acts as a simplified btrfs_search_slot().
1753  *    The key for search can be extracted from @dst_path->nodes[dst_level]
1754  *    (first key).
1755  *
1756  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1757  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1758  *    They should be marked during previous (@dst_level = 1) iteration.
1759  *
1760  * 3) Mark file extents in leaves dirty
1761  *    We don't have good way to pick out new file extents only.
1762  *    So we still follow the old method by scanning all file extents in
1763  *    the leave.
1764  *
1765  * This function can free us from keeping two paths, thus later we only need
1766  * to care about how to iterate all new tree blocks in reloc tree.
1767  */
1768 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1769 				    struct extent_buffer *src_eb,
1770 				    struct btrfs_path *dst_path,
1771 				    int dst_level, int root_level,
1772 				    bool trace_leaf)
1773 {
1774 	struct btrfs_key key;
1775 	struct btrfs_path *src_path;
1776 	struct btrfs_fs_info *fs_info = trans->fs_info;
1777 	u32 nodesize = fs_info->nodesize;
1778 	int cur_level = root_level;
1779 	int ret;
1780 
1781 	BUG_ON(dst_level > root_level);
1782 	/* Level mismatch */
1783 	if (btrfs_header_level(src_eb) != root_level)
1784 		return -EINVAL;
1785 
1786 	src_path = btrfs_alloc_path();
1787 	if (!src_path) {
1788 		ret = -ENOMEM;
1789 		goto out;
1790 	}
1791 
1792 	if (dst_level)
1793 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1794 	else
1795 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1796 
1797 	/* For src_path */
1798 	extent_buffer_get(src_eb);
1799 	src_path->nodes[root_level] = src_eb;
1800 	src_path->slots[root_level] = dst_path->slots[root_level];
1801 	src_path->locks[root_level] = 0;
1802 
1803 	/* A simplified version of btrfs_search_slot() */
1804 	while (cur_level >= dst_level) {
1805 		struct btrfs_key src_key;
1806 		struct btrfs_key dst_key;
1807 
1808 		if (src_path->nodes[cur_level] == NULL) {
1809 			struct btrfs_key first_key;
1810 			struct extent_buffer *eb;
1811 			int parent_slot;
1812 			u64 child_gen;
1813 			u64 child_bytenr;
1814 
1815 			eb = src_path->nodes[cur_level + 1];
1816 			parent_slot = src_path->slots[cur_level + 1];
1817 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1818 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1819 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1820 
1821 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
1822 					     cur_level, &first_key);
1823 			if (IS_ERR(eb)) {
1824 				ret = PTR_ERR(eb);
1825 				goto out;
1826 			} else if (!extent_buffer_uptodate(eb)) {
1827 				free_extent_buffer(eb);
1828 				ret = -EIO;
1829 				goto out;
1830 			}
1831 
1832 			src_path->nodes[cur_level] = eb;
1833 
1834 			btrfs_tree_read_lock(eb);
1835 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1836 			src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1837 		}
1838 
1839 		src_path->slots[cur_level] = dst_path->slots[cur_level];
1840 		if (cur_level) {
1841 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
1842 					&dst_key, dst_path->slots[cur_level]);
1843 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
1844 					&src_key, src_path->slots[cur_level]);
1845 		} else {
1846 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
1847 					&dst_key, dst_path->slots[cur_level]);
1848 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
1849 					&src_key, src_path->slots[cur_level]);
1850 		}
1851 		/* Content mismatch, something went wrong */
1852 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
1853 			ret = -ENOENT;
1854 			goto out;
1855 		}
1856 		cur_level--;
1857 	}
1858 
1859 	/*
1860 	 * Now both @dst_path and @src_path have been populated, record the tree
1861 	 * blocks for qgroup accounting.
1862 	 */
1863 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
1864 			nodesize, GFP_NOFS);
1865 	if (ret < 0)
1866 		goto out;
1867 	ret = btrfs_qgroup_trace_extent(trans,
1868 			dst_path->nodes[dst_level]->start,
1869 			nodesize, GFP_NOFS);
1870 	if (ret < 0)
1871 		goto out;
1872 
1873 	/* Record leaf file extents */
1874 	if (dst_level == 0 && trace_leaf) {
1875 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
1876 		if (ret < 0)
1877 			goto out;
1878 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
1879 	}
1880 out:
1881 	btrfs_free_path(src_path);
1882 	return ret;
1883 }
1884 
1885 /*
1886  * Helper function to do recursive generation-aware depth-first search, to
1887  * locate all new tree blocks in a subtree of reloc tree.
1888  *
1889  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1890  *         reloc tree
1891  * L2         NN (a)
1892  *          /    \
1893  * L1    OO        NN (b)
1894  *      /  \      /  \
1895  * L0  OO  OO    OO  NN
1896  *               (c) (d)
1897  * If we pass:
1898  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1899  * @cur_level = 1
1900  * @root_level = 1
1901  *
1902  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1903  * above tree blocks along with their counter parts in file tree.
1904  * While during search, old tree blocks OO(c) will be skipped as tree block swap
1905  * won't affect OO(c).
1906  */
1907 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1908 					   struct extent_buffer *src_eb,
1909 					   struct btrfs_path *dst_path,
1910 					   int cur_level, int root_level,
1911 					   u64 last_snapshot, bool trace_leaf)
1912 {
1913 	struct btrfs_fs_info *fs_info = trans->fs_info;
1914 	struct extent_buffer *eb;
1915 	bool need_cleanup = false;
1916 	int ret = 0;
1917 	int i;
1918 
1919 	/* Level sanity check */
1920 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
1921 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
1922 	    root_level < cur_level) {
1923 		btrfs_err_rl(fs_info,
1924 			"%s: bad levels, cur_level=%d root_level=%d",
1925 			__func__, cur_level, root_level);
1926 		return -EUCLEAN;
1927 	}
1928 
1929 	/* Read the tree block if needed */
1930 	if (dst_path->nodes[cur_level] == NULL) {
1931 		struct btrfs_key first_key;
1932 		int parent_slot;
1933 		u64 child_gen;
1934 		u64 child_bytenr;
1935 
1936 		/*
1937 		 * dst_path->nodes[root_level] must be initialized before
1938 		 * calling this function.
1939 		 */
1940 		if (cur_level == root_level) {
1941 			btrfs_err_rl(fs_info,
1942 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1943 				__func__, root_level, root_level, cur_level);
1944 			return -EUCLEAN;
1945 		}
1946 
1947 		/*
1948 		 * We need to get child blockptr/gen from parent before we can
1949 		 * read it.
1950 		  */
1951 		eb = dst_path->nodes[cur_level + 1];
1952 		parent_slot = dst_path->slots[cur_level + 1];
1953 		child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1954 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1955 		btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1956 
1957 		/* This node is old, no need to trace */
1958 		if (child_gen < last_snapshot)
1959 			goto out;
1960 
1961 		eb = read_tree_block(fs_info, child_bytenr, child_gen,
1962 				     cur_level, &first_key);
1963 		if (IS_ERR(eb)) {
1964 			ret = PTR_ERR(eb);
1965 			goto out;
1966 		} else if (!extent_buffer_uptodate(eb)) {
1967 			free_extent_buffer(eb);
1968 			ret = -EIO;
1969 			goto out;
1970 		}
1971 
1972 		dst_path->nodes[cur_level] = eb;
1973 		dst_path->slots[cur_level] = 0;
1974 
1975 		btrfs_tree_read_lock(eb);
1976 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1977 		dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1978 		need_cleanup = true;
1979 	}
1980 
1981 	/* Now record this tree block and its counter part for qgroups */
1982 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
1983 				       root_level, trace_leaf);
1984 	if (ret < 0)
1985 		goto cleanup;
1986 
1987 	eb = dst_path->nodes[cur_level];
1988 
1989 	if (cur_level > 0) {
1990 		/* Iterate all child tree blocks */
1991 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
1992 			/* Skip old tree blocks as they won't be swapped */
1993 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
1994 				continue;
1995 			dst_path->slots[cur_level] = i;
1996 
1997 			/* Recursive call (at most 7 times) */
1998 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
1999 					dst_path, cur_level - 1, root_level,
2000 					last_snapshot, trace_leaf);
2001 			if (ret < 0)
2002 				goto cleanup;
2003 		}
2004 	}
2005 
2006 cleanup:
2007 	if (need_cleanup) {
2008 		/* Clean up */
2009 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2010 				     dst_path->locks[cur_level]);
2011 		free_extent_buffer(dst_path->nodes[cur_level]);
2012 		dst_path->nodes[cur_level] = NULL;
2013 		dst_path->slots[cur_level] = 0;
2014 		dst_path->locks[cur_level] = 0;
2015 	}
2016 out:
2017 	return ret;
2018 }
2019 
2020 /*
2021  * Inform qgroup to trace subtree swap used in balance.
2022  *
2023  * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
2024  * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
2025  *
2026  * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
2027  * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
2028  * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
2029  * the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
2030  * and skip all tree blocks whose generation is smaller than last_snapshot.
2031  *
2032  * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
2033  * which could be the cause of very slow balance if the file tree is large.
2034  *
2035  * @src_parent, @src_slot: pointer to src (file tree) eb.
2036  * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
2037  */
2038 int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2039 				struct btrfs_block_group_cache *bg_cache,
2040 				struct extent_buffer *src_parent, int src_slot,
2041 				struct extent_buffer *dst_parent, int dst_slot,
2042 				u64 last_snapshot)
2043 {
2044 	struct btrfs_fs_info *fs_info = trans->fs_info;
2045 	struct btrfs_path *dst_path = NULL;
2046 	struct btrfs_key first_key;
2047 	struct extent_buffer *src_eb = NULL;
2048 	struct extent_buffer *dst_eb = NULL;
2049 	bool trace_leaf = false;
2050 	u64 child_gen;
2051 	u64 child_bytenr;
2052 	int level;
2053 	int ret;
2054 
2055 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2056 		return 0;
2057 
2058 	/* Check parameter order */
2059 	if (btrfs_node_ptr_generation(src_parent, src_slot) >
2060 	    btrfs_node_ptr_generation(dst_parent, dst_slot)) {
2061 		btrfs_err_rl(fs_info,
2062 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2063 			btrfs_node_ptr_generation(src_parent, src_slot),
2064 			btrfs_node_ptr_generation(dst_parent, dst_slot));
2065 		return -EUCLEAN;
2066 	}
2067 
2068 	/*
2069 	 * Only trace leaf if we're relocating data block groups, this could
2070 	 * reduce tons of data extents tracing for meta/sys bg relocation.
2071 	 */
2072 	if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
2073 		trace_leaf = true;
2074 	/* Read out real @src_eb, pointed by @src_parent and @src_slot */
2075 	child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
2076 	child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
2077 	btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
2078 
2079 	src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
2080 			btrfs_header_level(src_parent) - 1, &first_key);
2081 	if (IS_ERR(src_eb)) {
2082 		ret = PTR_ERR(src_eb);
2083 		goto out;
2084 	}
2085 
2086 	/* Read out real @dst_eb, pointed by @src_parent and @src_slot */
2087 	child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
2088 	child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
2089 	btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
2090 
2091 	dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
2092 			btrfs_header_level(dst_parent) - 1, &first_key);
2093 	if (IS_ERR(dst_eb)) {
2094 		ret = PTR_ERR(dst_eb);
2095 		goto out;
2096 	}
2097 
2098 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2099 		ret = -EINVAL;
2100 		goto out;
2101 	}
2102 
2103 	level = btrfs_header_level(dst_eb);
2104 	dst_path = btrfs_alloc_path();
2105 	if (!dst_path) {
2106 		ret = -ENOMEM;
2107 		goto out;
2108 	}
2109 
2110 	/* For dst_path */
2111 	extent_buffer_get(dst_eb);
2112 	dst_path->nodes[level] = dst_eb;
2113 	dst_path->slots[level] = 0;
2114 	dst_path->locks[level] = 0;
2115 
2116 	/* Do the generation-aware breadth-first search */
2117 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2118 					      level, last_snapshot, trace_leaf);
2119 	if (ret < 0)
2120 		goto out;
2121 	ret = 0;
2122 
2123 out:
2124 	free_extent_buffer(src_eb);
2125 	free_extent_buffer(dst_eb);
2126 	btrfs_free_path(dst_path);
2127 	if (ret < 0)
2128 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2129 	return ret;
2130 }
2131 
2132 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2133 			       struct extent_buffer *root_eb,
2134 			       u64 root_gen, int root_level)
2135 {
2136 	struct btrfs_fs_info *fs_info = trans->fs_info;
2137 	int ret = 0;
2138 	int level;
2139 	struct extent_buffer *eb = root_eb;
2140 	struct btrfs_path *path = NULL;
2141 
2142 	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2143 	BUG_ON(root_eb == NULL);
2144 
2145 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2146 		return 0;
2147 
2148 	if (!extent_buffer_uptodate(root_eb)) {
2149 		ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2150 		if (ret)
2151 			goto out;
2152 	}
2153 
2154 	if (root_level == 0) {
2155 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2156 		goto out;
2157 	}
2158 
2159 	path = btrfs_alloc_path();
2160 	if (!path)
2161 		return -ENOMEM;
2162 
2163 	/*
2164 	 * Walk down the tree.  Missing extent blocks are filled in as
2165 	 * we go. Metadata is accounted every time we read a new
2166 	 * extent block.
2167 	 *
2168 	 * When we reach a leaf, we account for file extent items in it,
2169 	 * walk back up the tree (adjusting slot pointers as we go)
2170 	 * and restart the search process.
2171 	 */
2172 	extent_buffer_get(root_eb); /* For path */
2173 	path->nodes[root_level] = root_eb;
2174 	path->slots[root_level] = 0;
2175 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2176 walk_down:
2177 	level = root_level;
2178 	while (level >= 0) {
2179 		if (path->nodes[level] == NULL) {
2180 			struct btrfs_key first_key;
2181 			int parent_slot;
2182 			u64 child_gen;
2183 			u64 child_bytenr;
2184 
2185 			/*
2186 			 * We need to get child blockptr/gen from parent before
2187 			 * we can read it.
2188 			  */
2189 			eb = path->nodes[level + 1];
2190 			parent_slot = path->slots[level + 1];
2191 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2192 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2193 			btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2194 
2195 			eb = read_tree_block(fs_info, child_bytenr, child_gen,
2196 					     level, &first_key);
2197 			if (IS_ERR(eb)) {
2198 				ret = PTR_ERR(eb);
2199 				goto out;
2200 			} else if (!extent_buffer_uptodate(eb)) {
2201 				free_extent_buffer(eb);
2202 				ret = -EIO;
2203 				goto out;
2204 			}
2205 
2206 			path->nodes[level] = eb;
2207 			path->slots[level] = 0;
2208 
2209 			btrfs_tree_read_lock(eb);
2210 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2211 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
2212 
2213 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2214 							fs_info->nodesize,
2215 							GFP_NOFS);
2216 			if (ret)
2217 				goto out;
2218 		}
2219 
2220 		if (level == 0) {
2221 			ret = btrfs_qgroup_trace_leaf_items(trans,
2222 							    path->nodes[level]);
2223 			if (ret)
2224 				goto out;
2225 
2226 			/* Nonzero return here means we completed our search */
2227 			ret = adjust_slots_upwards(path, root_level);
2228 			if (ret)
2229 				break;
2230 
2231 			/* Restart search with new slots */
2232 			goto walk_down;
2233 		}
2234 
2235 		level--;
2236 	}
2237 
2238 	ret = 0;
2239 out:
2240 	btrfs_free_path(path);
2241 
2242 	return ret;
2243 }
2244 
2245 #define UPDATE_NEW	0
2246 #define UPDATE_OLD	1
2247 /*
2248  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2249  */
2250 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2251 				struct ulist *roots, struct ulist *tmp,
2252 				struct ulist *qgroups, u64 seq, int update_old)
2253 {
2254 	struct ulist_node *unode;
2255 	struct ulist_iterator uiter;
2256 	struct ulist_node *tmp_unode;
2257 	struct ulist_iterator tmp_uiter;
2258 	struct btrfs_qgroup *qg;
2259 	int ret = 0;
2260 
2261 	if (!roots)
2262 		return 0;
2263 	ULIST_ITER_INIT(&uiter);
2264 	while ((unode = ulist_next(roots, &uiter))) {
2265 		qg = find_qgroup_rb(fs_info, unode->val);
2266 		if (!qg)
2267 			continue;
2268 
2269 		ulist_reinit(tmp);
2270 		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2271 				GFP_ATOMIC);
2272 		if (ret < 0)
2273 			return ret;
2274 		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2275 		if (ret < 0)
2276 			return ret;
2277 		ULIST_ITER_INIT(&tmp_uiter);
2278 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2279 			struct btrfs_qgroup_list *glist;
2280 
2281 			qg = unode_aux_to_qgroup(tmp_unode);
2282 			if (update_old)
2283 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2284 			else
2285 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2286 			list_for_each_entry(glist, &qg->groups, next_group) {
2287 				ret = ulist_add(qgroups, glist->group->qgroupid,
2288 						qgroup_to_aux(glist->group),
2289 						GFP_ATOMIC);
2290 				if (ret < 0)
2291 					return ret;
2292 				ret = ulist_add(tmp, glist->group->qgroupid,
2293 						qgroup_to_aux(glist->group),
2294 						GFP_ATOMIC);
2295 				if (ret < 0)
2296 					return ret;
2297 			}
2298 		}
2299 	}
2300 	return 0;
2301 }
2302 
2303 /*
2304  * Update qgroup rfer/excl counters.
2305  * Rfer update is easy, codes can explain themselves.
2306  *
2307  * Excl update is tricky, the update is split into 2 part.
2308  * Part 1: Possible exclusive <-> sharing detect:
2309  *	|	A	|	!A	|
2310  *  -------------------------------------
2311  *  B	|	*	|	-	|
2312  *  -------------------------------------
2313  *  !B	|	+	|	**	|
2314  *  -------------------------------------
2315  *
2316  * Conditions:
2317  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2318  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2319  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2320  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2321  *
2322  * Results:
2323  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2324  * *: Definitely not changed.		**: Possible unchanged.
2325  *
2326  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2327  *
2328  * To make the logic clear, we first use condition A and B to split
2329  * combination into 4 results.
2330  *
2331  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2332  * only on variant maybe 0.
2333  *
2334  * Lastly, check result **, since there are 2 variants maybe 0, split them
2335  * again(2x2).
2336  * But this time we don't need to consider other things, the codes and logic
2337  * is easy to understand now.
2338  */
2339 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2340 				  struct ulist *qgroups,
2341 				  u64 nr_old_roots,
2342 				  u64 nr_new_roots,
2343 				  u64 num_bytes, u64 seq)
2344 {
2345 	struct ulist_node *unode;
2346 	struct ulist_iterator uiter;
2347 	struct btrfs_qgroup *qg;
2348 	u64 cur_new_count, cur_old_count;
2349 
2350 	ULIST_ITER_INIT(&uiter);
2351 	while ((unode = ulist_next(qgroups, &uiter))) {
2352 		bool dirty = false;
2353 
2354 		qg = unode_aux_to_qgroup(unode);
2355 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2356 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2357 
2358 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2359 					     cur_new_count);
2360 
2361 		/* Rfer update part */
2362 		if (cur_old_count == 0 && cur_new_count > 0) {
2363 			qg->rfer += num_bytes;
2364 			qg->rfer_cmpr += num_bytes;
2365 			dirty = true;
2366 		}
2367 		if (cur_old_count > 0 && cur_new_count == 0) {
2368 			qg->rfer -= num_bytes;
2369 			qg->rfer_cmpr -= num_bytes;
2370 			dirty = true;
2371 		}
2372 
2373 		/* Excl update part */
2374 		/* Exclusive/none -> shared case */
2375 		if (cur_old_count == nr_old_roots &&
2376 		    cur_new_count < nr_new_roots) {
2377 			/* Exclusive -> shared */
2378 			if (cur_old_count != 0) {
2379 				qg->excl -= num_bytes;
2380 				qg->excl_cmpr -= num_bytes;
2381 				dirty = true;
2382 			}
2383 		}
2384 
2385 		/* Shared -> exclusive/none case */
2386 		if (cur_old_count < nr_old_roots &&
2387 		    cur_new_count == nr_new_roots) {
2388 			/* Shared->exclusive */
2389 			if (cur_new_count != 0) {
2390 				qg->excl += num_bytes;
2391 				qg->excl_cmpr += num_bytes;
2392 				dirty = true;
2393 			}
2394 		}
2395 
2396 		/* Exclusive/none -> exclusive/none case */
2397 		if (cur_old_count == nr_old_roots &&
2398 		    cur_new_count == nr_new_roots) {
2399 			if (cur_old_count == 0) {
2400 				/* None -> exclusive/none */
2401 
2402 				if (cur_new_count != 0) {
2403 					/* None -> exclusive */
2404 					qg->excl += num_bytes;
2405 					qg->excl_cmpr += num_bytes;
2406 					dirty = true;
2407 				}
2408 				/* None -> none, nothing changed */
2409 			} else {
2410 				/* Exclusive -> exclusive/none */
2411 
2412 				if (cur_new_count == 0) {
2413 					/* Exclusive -> none */
2414 					qg->excl -= num_bytes;
2415 					qg->excl_cmpr -= num_bytes;
2416 					dirty = true;
2417 				}
2418 				/* Exclusive -> exclusive, nothing changed */
2419 			}
2420 		}
2421 
2422 		if (dirty)
2423 			qgroup_dirty(fs_info, qg);
2424 	}
2425 	return 0;
2426 }
2427 
2428 /*
2429  * Check if the @roots potentially is a list of fs tree roots
2430  *
2431  * Return 0 for definitely not a fs/subvol tree roots ulist
2432  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2433  *          one as well)
2434  */
2435 static int maybe_fs_roots(struct ulist *roots)
2436 {
2437 	struct ulist_node *unode;
2438 	struct ulist_iterator uiter;
2439 
2440 	/* Empty one, still possible for fs roots */
2441 	if (!roots || roots->nnodes == 0)
2442 		return 1;
2443 
2444 	ULIST_ITER_INIT(&uiter);
2445 	unode = ulist_next(roots, &uiter);
2446 	if (!unode)
2447 		return 1;
2448 
2449 	/*
2450 	 * If it contains fs tree roots, then it must belong to fs/subvol
2451 	 * trees.
2452 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2453 	 */
2454 	return is_fstree(unode->val);
2455 }
2456 
2457 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2458 				u64 num_bytes, struct ulist *old_roots,
2459 				struct ulist *new_roots)
2460 {
2461 	struct btrfs_fs_info *fs_info = trans->fs_info;
2462 	struct ulist *qgroups = NULL;
2463 	struct ulist *tmp = NULL;
2464 	u64 seq;
2465 	u64 nr_new_roots = 0;
2466 	u64 nr_old_roots = 0;
2467 	int ret = 0;
2468 
2469 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2470 		return 0;
2471 
2472 	if (new_roots) {
2473 		if (!maybe_fs_roots(new_roots))
2474 			goto out_free;
2475 		nr_new_roots = new_roots->nnodes;
2476 	}
2477 	if (old_roots) {
2478 		if (!maybe_fs_roots(old_roots))
2479 			goto out_free;
2480 		nr_old_roots = old_roots->nnodes;
2481 	}
2482 
2483 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2484 	if (nr_old_roots == 0 && nr_new_roots == 0)
2485 		goto out_free;
2486 
2487 	BUG_ON(!fs_info->quota_root);
2488 
2489 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2490 					num_bytes, nr_old_roots, nr_new_roots);
2491 
2492 	qgroups = ulist_alloc(GFP_NOFS);
2493 	if (!qgroups) {
2494 		ret = -ENOMEM;
2495 		goto out_free;
2496 	}
2497 	tmp = ulist_alloc(GFP_NOFS);
2498 	if (!tmp) {
2499 		ret = -ENOMEM;
2500 		goto out_free;
2501 	}
2502 
2503 	mutex_lock(&fs_info->qgroup_rescan_lock);
2504 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2505 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2506 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2507 			ret = 0;
2508 			goto out_free;
2509 		}
2510 	}
2511 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2512 
2513 	spin_lock(&fs_info->qgroup_lock);
2514 	seq = fs_info->qgroup_seq;
2515 
2516 	/* Update old refcnts using old_roots */
2517 	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2518 				   UPDATE_OLD);
2519 	if (ret < 0)
2520 		goto out;
2521 
2522 	/* Update new refcnts using new_roots */
2523 	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2524 				   UPDATE_NEW);
2525 	if (ret < 0)
2526 		goto out;
2527 
2528 	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2529 			       num_bytes, seq);
2530 
2531 	/*
2532 	 * Bump qgroup_seq to avoid seq overlap
2533 	 */
2534 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2535 out:
2536 	spin_unlock(&fs_info->qgroup_lock);
2537 out_free:
2538 	ulist_free(tmp);
2539 	ulist_free(qgroups);
2540 	ulist_free(old_roots);
2541 	ulist_free(new_roots);
2542 	return ret;
2543 }
2544 
2545 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2546 {
2547 	struct btrfs_fs_info *fs_info = trans->fs_info;
2548 	struct btrfs_qgroup_extent_record *record;
2549 	struct btrfs_delayed_ref_root *delayed_refs;
2550 	struct ulist *new_roots = NULL;
2551 	struct rb_node *node;
2552 	u64 num_dirty_extents = 0;
2553 	u64 qgroup_to_skip;
2554 	int ret = 0;
2555 
2556 	delayed_refs = &trans->transaction->delayed_refs;
2557 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2558 	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2559 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
2560 				  node);
2561 
2562 		num_dirty_extents++;
2563 		trace_btrfs_qgroup_account_extents(fs_info, record);
2564 
2565 		if (!ret) {
2566 			/*
2567 			 * Old roots should be searched when inserting qgroup
2568 			 * extent record
2569 			 */
2570 			if (WARN_ON(!record->old_roots)) {
2571 				/* Search commit root to find old_roots */
2572 				ret = btrfs_find_all_roots(NULL, fs_info,
2573 						record->bytenr, 0,
2574 						&record->old_roots, false);
2575 				if (ret < 0)
2576 					goto cleanup;
2577 			}
2578 
2579 			/*
2580 			 * Use SEQ_LAST as time_seq to do special search, which
2581 			 * doesn't lock tree or delayed_refs and search current
2582 			 * root. It's safe inside commit_transaction().
2583 			 */
2584 			ret = btrfs_find_all_roots(trans, fs_info,
2585 				record->bytenr, SEQ_LAST, &new_roots, false);
2586 			if (ret < 0)
2587 				goto cleanup;
2588 			if (qgroup_to_skip) {
2589 				ulist_del(new_roots, qgroup_to_skip, 0);
2590 				ulist_del(record->old_roots, qgroup_to_skip,
2591 					  0);
2592 			}
2593 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2594 							  record->num_bytes,
2595 							  record->old_roots,
2596 							  new_roots);
2597 			record->old_roots = NULL;
2598 			new_roots = NULL;
2599 		}
2600 cleanup:
2601 		ulist_free(record->old_roots);
2602 		ulist_free(new_roots);
2603 		new_roots = NULL;
2604 		rb_erase(node, &delayed_refs->dirty_extent_root);
2605 		kfree(record);
2606 
2607 	}
2608 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2609 				       num_dirty_extents);
2610 	return ret;
2611 }
2612 
2613 /*
2614  * called from commit_transaction. Writes all changed qgroups to disk.
2615  */
2616 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2617 {
2618 	struct btrfs_fs_info *fs_info = trans->fs_info;
2619 	struct btrfs_root *quota_root = fs_info->quota_root;
2620 	int ret = 0;
2621 
2622 	if (!quota_root)
2623 		return ret;
2624 
2625 	spin_lock(&fs_info->qgroup_lock);
2626 	while (!list_empty(&fs_info->dirty_qgroups)) {
2627 		struct btrfs_qgroup *qgroup;
2628 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
2629 					  struct btrfs_qgroup, dirty);
2630 		list_del_init(&qgroup->dirty);
2631 		spin_unlock(&fs_info->qgroup_lock);
2632 		ret = update_qgroup_info_item(trans, qgroup);
2633 		if (ret)
2634 			fs_info->qgroup_flags |=
2635 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2636 		ret = update_qgroup_limit_item(trans, qgroup);
2637 		if (ret)
2638 			fs_info->qgroup_flags |=
2639 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2640 		spin_lock(&fs_info->qgroup_lock);
2641 	}
2642 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2643 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2644 	else
2645 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2646 	spin_unlock(&fs_info->qgroup_lock);
2647 
2648 	ret = update_qgroup_status_item(trans);
2649 	if (ret)
2650 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2651 
2652 	return ret;
2653 }
2654 
2655 /*
2656  * Copy the accounting information between qgroups. This is necessary
2657  * when a snapshot or a subvolume is created. Throwing an error will
2658  * cause a transaction abort so we take extra care here to only error
2659  * when a readonly fs is a reasonable outcome.
2660  */
2661 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2662 			 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2663 {
2664 	int ret = 0;
2665 	int i;
2666 	u64 *i_qgroups;
2667 	struct btrfs_fs_info *fs_info = trans->fs_info;
2668 	struct btrfs_root *quota_root;
2669 	struct btrfs_qgroup *srcgroup;
2670 	struct btrfs_qgroup *dstgroup;
2671 	u32 level_size = 0;
2672 	u64 nums;
2673 
2674 	mutex_lock(&fs_info->qgroup_ioctl_lock);
2675 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2676 		goto out;
2677 
2678 	quota_root = fs_info->quota_root;
2679 	if (!quota_root) {
2680 		ret = -EINVAL;
2681 		goto out;
2682 	}
2683 
2684 	if (inherit) {
2685 		i_qgroups = (u64 *)(inherit + 1);
2686 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2687 		       2 * inherit->num_excl_copies;
2688 		for (i = 0; i < nums; ++i) {
2689 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2690 
2691 			/*
2692 			 * Zero out invalid groups so we can ignore
2693 			 * them later.
2694 			 */
2695 			if (!srcgroup ||
2696 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2697 				*i_qgroups = 0ULL;
2698 
2699 			++i_qgroups;
2700 		}
2701 	}
2702 
2703 	/*
2704 	 * create a tracking group for the subvol itself
2705 	 */
2706 	ret = add_qgroup_item(trans, quota_root, objectid);
2707 	if (ret)
2708 		goto out;
2709 
2710 	/*
2711 	 * add qgroup to all inherited groups
2712 	 */
2713 	if (inherit) {
2714 		i_qgroups = (u64 *)(inherit + 1);
2715 		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2716 			if (*i_qgroups == 0)
2717 				continue;
2718 			ret = add_qgroup_relation_item(trans, objectid,
2719 						       *i_qgroups);
2720 			if (ret && ret != -EEXIST)
2721 				goto out;
2722 			ret = add_qgroup_relation_item(trans, *i_qgroups,
2723 						       objectid);
2724 			if (ret && ret != -EEXIST)
2725 				goto out;
2726 		}
2727 		ret = 0;
2728 	}
2729 
2730 
2731 	spin_lock(&fs_info->qgroup_lock);
2732 
2733 	dstgroup = add_qgroup_rb(fs_info, objectid);
2734 	if (IS_ERR(dstgroup)) {
2735 		ret = PTR_ERR(dstgroup);
2736 		goto unlock;
2737 	}
2738 
2739 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2740 		dstgroup->lim_flags = inherit->lim.flags;
2741 		dstgroup->max_rfer = inherit->lim.max_rfer;
2742 		dstgroup->max_excl = inherit->lim.max_excl;
2743 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2744 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
2745 
2746 		ret = update_qgroup_limit_item(trans, dstgroup);
2747 		if (ret) {
2748 			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2749 			btrfs_info(fs_info,
2750 				   "unable to update quota limit for %llu",
2751 				   dstgroup->qgroupid);
2752 			goto unlock;
2753 		}
2754 	}
2755 
2756 	if (srcid) {
2757 		srcgroup = find_qgroup_rb(fs_info, srcid);
2758 		if (!srcgroup)
2759 			goto unlock;
2760 
2761 		/*
2762 		 * We call inherit after we clone the root in order to make sure
2763 		 * our counts don't go crazy, so at this point the only
2764 		 * difference between the two roots should be the root node.
2765 		 */
2766 		level_size = fs_info->nodesize;
2767 		dstgroup->rfer = srcgroup->rfer;
2768 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2769 		dstgroup->excl = level_size;
2770 		dstgroup->excl_cmpr = level_size;
2771 		srcgroup->excl = level_size;
2772 		srcgroup->excl_cmpr = level_size;
2773 
2774 		/* inherit the limit info */
2775 		dstgroup->lim_flags = srcgroup->lim_flags;
2776 		dstgroup->max_rfer = srcgroup->max_rfer;
2777 		dstgroup->max_excl = srcgroup->max_excl;
2778 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2779 		dstgroup->rsv_excl = srcgroup->rsv_excl;
2780 
2781 		qgroup_dirty(fs_info, dstgroup);
2782 		qgroup_dirty(fs_info, srcgroup);
2783 	}
2784 
2785 	if (!inherit)
2786 		goto unlock;
2787 
2788 	i_qgroups = (u64 *)(inherit + 1);
2789 	for (i = 0; i < inherit->num_qgroups; ++i) {
2790 		if (*i_qgroups) {
2791 			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2792 			if (ret)
2793 				goto unlock;
2794 		}
2795 		++i_qgroups;
2796 	}
2797 
2798 	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2799 		struct btrfs_qgroup *src;
2800 		struct btrfs_qgroup *dst;
2801 
2802 		if (!i_qgroups[0] || !i_qgroups[1])
2803 			continue;
2804 
2805 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2806 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2807 
2808 		if (!src || !dst) {
2809 			ret = -EINVAL;
2810 			goto unlock;
2811 		}
2812 
2813 		dst->rfer = src->rfer - level_size;
2814 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2815 	}
2816 	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2817 		struct btrfs_qgroup *src;
2818 		struct btrfs_qgroup *dst;
2819 
2820 		if (!i_qgroups[0] || !i_qgroups[1])
2821 			continue;
2822 
2823 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2824 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2825 
2826 		if (!src || !dst) {
2827 			ret = -EINVAL;
2828 			goto unlock;
2829 		}
2830 
2831 		dst->excl = src->excl + level_size;
2832 		dst->excl_cmpr = src->excl_cmpr + level_size;
2833 	}
2834 
2835 unlock:
2836 	spin_unlock(&fs_info->qgroup_lock);
2837 out:
2838 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2839 	return ret;
2840 }
2841 
2842 /*
2843  * Two limits to commit transaction in advance.
2844  *
2845  * For RATIO, it will be 1/RATIO of the remaining limit
2846  * (excluding data and prealloc meta) as threshold.
2847  * For SIZE, it will be in byte unit as threshold.
2848  */
2849 #define QGROUP_PERTRANS_RATIO		32
2850 #define QGROUP_PERTRANS_SIZE		SZ_32M
2851 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
2852 				const struct btrfs_qgroup *qg, u64 num_bytes)
2853 {
2854 	u64 limit;
2855 	u64 threshold;
2856 
2857 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2858 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2859 		return false;
2860 
2861 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2862 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2863 		return false;
2864 
2865 	/*
2866 	 * Even if we passed the check, it's better to check if reservation
2867 	 * for meta_pertrans is pushing us near limit.
2868 	 * If there is too much pertrans reservation or it's near the limit,
2869 	 * let's try commit transaction to free some, using transaction_kthread
2870 	 */
2871 	if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
2872 			      BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
2873 		if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
2874 			limit = qg->max_excl;
2875 		else
2876 			limit = qg->max_rfer;
2877 		threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
2878 			    qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
2879 			    QGROUP_PERTRANS_RATIO;
2880 		threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
2881 
2882 		/*
2883 		 * Use transaction_kthread to commit transaction, so we no
2884 		 * longer need to bother nested transaction nor lock context.
2885 		 */
2886 		if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
2887 			btrfs_commit_transaction_locksafe(fs_info);
2888 	}
2889 
2890 	return true;
2891 }
2892 
2893 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2894 			  enum btrfs_qgroup_rsv_type type)
2895 {
2896 	struct btrfs_root *quota_root;
2897 	struct btrfs_qgroup *qgroup;
2898 	struct btrfs_fs_info *fs_info = root->fs_info;
2899 	u64 ref_root = root->root_key.objectid;
2900 	int ret = 0;
2901 	struct ulist_node *unode;
2902 	struct ulist_iterator uiter;
2903 
2904 	if (!is_fstree(ref_root))
2905 		return 0;
2906 
2907 	if (num_bytes == 0)
2908 		return 0;
2909 
2910 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2911 	    capable(CAP_SYS_RESOURCE))
2912 		enforce = false;
2913 
2914 	spin_lock(&fs_info->qgroup_lock);
2915 	quota_root = fs_info->quota_root;
2916 	if (!quota_root)
2917 		goto out;
2918 
2919 	qgroup = find_qgroup_rb(fs_info, ref_root);
2920 	if (!qgroup)
2921 		goto out;
2922 
2923 	/*
2924 	 * in a first step, we check all affected qgroups if any limits would
2925 	 * be exceeded
2926 	 */
2927 	ulist_reinit(fs_info->qgroup_ulist);
2928 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2929 			qgroup_to_aux(qgroup), GFP_ATOMIC);
2930 	if (ret < 0)
2931 		goto out;
2932 	ULIST_ITER_INIT(&uiter);
2933 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2934 		struct btrfs_qgroup *qg;
2935 		struct btrfs_qgroup_list *glist;
2936 
2937 		qg = unode_aux_to_qgroup(unode);
2938 
2939 		if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
2940 			ret = -EDQUOT;
2941 			goto out;
2942 		}
2943 
2944 		list_for_each_entry(glist, &qg->groups, next_group) {
2945 			ret = ulist_add(fs_info->qgroup_ulist,
2946 					glist->group->qgroupid,
2947 					qgroup_to_aux(glist->group), GFP_ATOMIC);
2948 			if (ret < 0)
2949 				goto out;
2950 		}
2951 	}
2952 	ret = 0;
2953 	/*
2954 	 * no limits exceeded, now record the reservation into all qgroups
2955 	 */
2956 	ULIST_ITER_INIT(&uiter);
2957 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2958 		struct btrfs_qgroup *qg;
2959 
2960 		qg = unode_aux_to_qgroup(unode);
2961 
2962 		trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
2963 		qgroup_rsv_add(fs_info, qg, num_bytes, type);
2964 	}
2965 
2966 out:
2967 	spin_unlock(&fs_info->qgroup_lock);
2968 	return ret;
2969 }
2970 
2971 /*
2972  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
2973  * qgroup).
2974  *
2975  * Will handle all higher level qgroup too.
2976  *
2977  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2978  * This special case is only used for META_PERTRANS type.
2979  */
2980 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2981 			       u64 ref_root, u64 num_bytes,
2982 			       enum btrfs_qgroup_rsv_type type)
2983 {
2984 	struct btrfs_root *quota_root;
2985 	struct btrfs_qgroup *qgroup;
2986 	struct ulist_node *unode;
2987 	struct ulist_iterator uiter;
2988 	int ret = 0;
2989 
2990 	if (!is_fstree(ref_root))
2991 		return;
2992 
2993 	if (num_bytes == 0)
2994 		return;
2995 
2996 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
2997 		WARN(1, "%s: Invalid type to free", __func__);
2998 		return;
2999 	}
3000 	spin_lock(&fs_info->qgroup_lock);
3001 
3002 	quota_root = fs_info->quota_root;
3003 	if (!quota_root)
3004 		goto out;
3005 
3006 	qgroup = find_qgroup_rb(fs_info, ref_root);
3007 	if (!qgroup)
3008 		goto out;
3009 
3010 	if (num_bytes == (u64)-1)
3011 		/*
3012 		 * We're freeing all pertrans rsv, get reserved value from
3013 		 * level 0 qgroup as real num_bytes to free.
3014 		 */
3015 		num_bytes = qgroup->rsv.values[type];
3016 
3017 	ulist_reinit(fs_info->qgroup_ulist);
3018 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3019 			qgroup_to_aux(qgroup), GFP_ATOMIC);
3020 	if (ret < 0)
3021 		goto out;
3022 	ULIST_ITER_INIT(&uiter);
3023 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3024 		struct btrfs_qgroup *qg;
3025 		struct btrfs_qgroup_list *glist;
3026 
3027 		qg = unode_aux_to_qgroup(unode);
3028 
3029 		trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
3030 		qgroup_rsv_release(fs_info, qg, num_bytes, type);
3031 
3032 		list_for_each_entry(glist, &qg->groups, next_group) {
3033 			ret = ulist_add(fs_info->qgroup_ulist,
3034 					glist->group->qgroupid,
3035 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3036 			if (ret < 0)
3037 				goto out;
3038 		}
3039 	}
3040 
3041 out:
3042 	spin_unlock(&fs_info->qgroup_lock);
3043 }
3044 
3045 /*
3046  * Check if the leaf is the last leaf. Which means all node pointers
3047  * are at their last position.
3048  */
3049 static bool is_last_leaf(struct btrfs_path *path)
3050 {
3051 	int i;
3052 
3053 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3054 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3055 			return false;
3056 	}
3057 	return true;
3058 }
3059 
3060 /*
3061  * returns < 0 on error, 0 when more leafs are to be scanned.
3062  * returns 1 when done.
3063  */
3064 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3065 			      struct btrfs_path *path)
3066 {
3067 	struct btrfs_fs_info *fs_info = trans->fs_info;
3068 	struct btrfs_key found;
3069 	struct extent_buffer *scratch_leaf = NULL;
3070 	struct ulist *roots = NULL;
3071 	u64 num_bytes;
3072 	bool done;
3073 	int slot;
3074 	int ret;
3075 
3076 	mutex_lock(&fs_info->qgroup_rescan_lock);
3077 	ret = btrfs_search_slot_for_read(fs_info->extent_root,
3078 					 &fs_info->qgroup_rescan_progress,
3079 					 path, 1, 0);
3080 
3081 	btrfs_debug(fs_info,
3082 		"current progress key (%llu %u %llu), search_slot ret %d",
3083 		fs_info->qgroup_rescan_progress.objectid,
3084 		fs_info->qgroup_rescan_progress.type,
3085 		fs_info->qgroup_rescan_progress.offset, ret);
3086 
3087 	if (ret) {
3088 		/*
3089 		 * The rescan is about to end, we will not be scanning any
3090 		 * further blocks. We cannot unset the RESCAN flag here, because
3091 		 * we want to commit the transaction if everything went well.
3092 		 * To make the live accounting work in this phase, we set our
3093 		 * scan progress pointer such that every real extent objectid
3094 		 * will be smaller.
3095 		 */
3096 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3097 		btrfs_release_path(path);
3098 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3099 		return ret;
3100 	}
3101 	done = is_last_leaf(path);
3102 
3103 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3104 			      btrfs_header_nritems(path->nodes[0]) - 1);
3105 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3106 
3107 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3108 	if (!scratch_leaf) {
3109 		ret = -ENOMEM;
3110 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3111 		goto out;
3112 	}
3113 	slot = path->slots[0];
3114 	btrfs_release_path(path);
3115 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3116 
3117 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3118 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3119 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3120 		    found.type != BTRFS_METADATA_ITEM_KEY)
3121 			continue;
3122 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3123 			num_bytes = fs_info->nodesize;
3124 		else
3125 			num_bytes = found.offset;
3126 
3127 		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3128 					   &roots, false);
3129 		if (ret < 0)
3130 			goto out;
3131 		/* For rescan, just pass old_roots as NULL */
3132 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3133 						  num_bytes, NULL, roots);
3134 		if (ret < 0)
3135 			goto out;
3136 	}
3137 out:
3138 	if (scratch_leaf)
3139 		free_extent_buffer(scratch_leaf);
3140 
3141 	if (done && !ret) {
3142 		ret = 1;
3143 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3144 	}
3145 	return ret;
3146 }
3147 
3148 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3149 {
3150 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3151 						     qgroup_rescan_work);
3152 	struct btrfs_path *path;
3153 	struct btrfs_trans_handle *trans = NULL;
3154 	int err = -ENOMEM;
3155 	int ret = 0;
3156 
3157 	path = btrfs_alloc_path();
3158 	if (!path)
3159 		goto out;
3160 	/*
3161 	 * Rescan should only search for commit root, and any later difference
3162 	 * should be recorded by qgroup
3163 	 */
3164 	path->search_commit_root = 1;
3165 	path->skip_locking = 1;
3166 
3167 	err = 0;
3168 	while (!err && !btrfs_fs_closing(fs_info)) {
3169 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3170 		if (IS_ERR(trans)) {
3171 			err = PTR_ERR(trans);
3172 			break;
3173 		}
3174 		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3175 			err = -EINTR;
3176 		} else {
3177 			err = qgroup_rescan_leaf(trans, path);
3178 		}
3179 		if (err > 0)
3180 			btrfs_commit_transaction(trans);
3181 		else
3182 			btrfs_end_transaction(trans);
3183 	}
3184 
3185 out:
3186 	btrfs_free_path(path);
3187 
3188 	mutex_lock(&fs_info->qgroup_rescan_lock);
3189 	if (!btrfs_fs_closing(fs_info))
3190 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3191 
3192 	if (err > 0 &&
3193 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3194 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3195 	} else if (err < 0) {
3196 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3197 	}
3198 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3199 
3200 	/*
3201 	 * only update status, since the previous part has already updated the
3202 	 * qgroup info.
3203 	 */
3204 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
3205 	if (IS_ERR(trans)) {
3206 		err = PTR_ERR(trans);
3207 		btrfs_err(fs_info,
3208 			  "fail to start transaction for status update: %d",
3209 			  err);
3210 		goto done;
3211 	}
3212 	ret = update_qgroup_status_item(trans);
3213 	if (ret < 0) {
3214 		err = ret;
3215 		btrfs_err(fs_info, "fail to update qgroup status: %d", err);
3216 	}
3217 	btrfs_end_transaction(trans);
3218 
3219 	if (btrfs_fs_closing(fs_info)) {
3220 		btrfs_info(fs_info, "qgroup scan paused");
3221 	} else if (err >= 0) {
3222 		btrfs_info(fs_info, "qgroup scan completed%s",
3223 			err > 0 ? " (inconsistency flag cleared)" : "");
3224 	} else {
3225 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
3226 	}
3227 
3228 done:
3229 	mutex_lock(&fs_info->qgroup_rescan_lock);
3230 	fs_info->qgroup_rescan_running = false;
3231 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3232 	complete_all(&fs_info->qgroup_rescan_completion);
3233 }
3234 
3235 /*
3236  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3237  * memory required for the rescan context.
3238  */
3239 static int
3240 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3241 		   int init_flags)
3242 {
3243 	int ret = 0;
3244 
3245 	if (!init_flags) {
3246 		/* we're resuming qgroup rescan at mount time */
3247 		if (!(fs_info->qgroup_flags &
3248 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3249 			btrfs_warn(fs_info,
3250 			"qgroup rescan init failed, qgroup is not enabled");
3251 			ret = -EINVAL;
3252 		} else if (!(fs_info->qgroup_flags &
3253 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3254 			btrfs_warn(fs_info,
3255 			"qgroup rescan init failed, qgroup rescan is not queued");
3256 			ret = -EINVAL;
3257 		}
3258 
3259 		if (ret)
3260 			return ret;
3261 	}
3262 
3263 	mutex_lock(&fs_info->qgroup_rescan_lock);
3264 	spin_lock(&fs_info->qgroup_lock);
3265 
3266 	if (init_flags) {
3267 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3268 			btrfs_warn(fs_info,
3269 				   "qgroup rescan is already in progress");
3270 			ret = -EINPROGRESS;
3271 		} else if (!(fs_info->qgroup_flags &
3272 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3273 			btrfs_warn(fs_info,
3274 			"qgroup rescan init failed, qgroup is not enabled");
3275 			ret = -EINVAL;
3276 		}
3277 
3278 		if (ret) {
3279 			spin_unlock(&fs_info->qgroup_lock);
3280 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3281 			return ret;
3282 		}
3283 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3284 	}
3285 
3286 	memset(&fs_info->qgroup_rescan_progress, 0,
3287 		sizeof(fs_info->qgroup_rescan_progress));
3288 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3289 	init_completion(&fs_info->qgroup_rescan_completion);
3290 	fs_info->qgroup_rescan_running = true;
3291 
3292 	spin_unlock(&fs_info->qgroup_lock);
3293 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3294 
3295 	memset(&fs_info->qgroup_rescan_work, 0,
3296 	       sizeof(fs_info->qgroup_rescan_work));
3297 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3298 			btrfs_qgroup_rescan_helper,
3299 			btrfs_qgroup_rescan_worker, NULL, NULL);
3300 	return 0;
3301 }
3302 
3303 static void
3304 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3305 {
3306 	struct rb_node *n;
3307 	struct btrfs_qgroup *qgroup;
3308 
3309 	spin_lock(&fs_info->qgroup_lock);
3310 	/* clear all current qgroup tracking information */
3311 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3312 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3313 		qgroup->rfer = 0;
3314 		qgroup->rfer_cmpr = 0;
3315 		qgroup->excl = 0;
3316 		qgroup->excl_cmpr = 0;
3317 		qgroup_dirty(fs_info, qgroup);
3318 	}
3319 	spin_unlock(&fs_info->qgroup_lock);
3320 }
3321 
3322 int
3323 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3324 {
3325 	int ret = 0;
3326 	struct btrfs_trans_handle *trans;
3327 
3328 	ret = qgroup_rescan_init(fs_info, 0, 1);
3329 	if (ret)
3330 		return ret;
3331 
3332 	/*
3333 	 * We have set the rescan_progress to 0, which means no more
3334 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3335 	 * However, btrfs_qgroup_account_ref may be right after its call
3336 	 * to btrfs_find_all_roots, in which case it would still do the
3337 	 * accounting.
3338 	 * To solve this, we're committing the transaction, which will
3339 	 * ensure we run all delayed refs and only after that, we are
3340 	 * going to clear all tracking information for a clean start.
3341 	 */
3342 
3343 	trans = btrfs_join_transaction(fs_info->fs_root);
3344 	if (IS_ERR(trans)) {
3345 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3346 		return PTR_ERR(trans);
3347 	}
3348 	ret = btrfs_commit_transaction(trans);
3349 	if (ret) {
3350 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3351 		return ret;
3352 	}
3353 
3354 	qgroup_rescan_zero_tracking(fs_info);
3355 
3356 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
3357 			 &fs_info->qgroup_rescan_work);
3358 
3359 	return 0;
3360 }
3361 
3362 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3363 				     bool interruptible)
3364 {
3365 	int running;
3366 	int ret = 0;
3367 
3368 	mutex_lock(&fs_info->qgroup_rescan_lock);
3369 	spin_lock(&fs_info->qgroup_lock);
3370 	running = fs_info->qgroup_rescan_running;
3371 	spin_unlock(&fs_info->qgroup_lock);
3372 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3373 
3374 	if (!running)
3375 		return 0;
3376 
3377 	if (interruptible)
3378 		ret = wait_for_completion_interruptible(
3379 					&fs_info->qgroup_rescan_completion);
3380 	else
3381 		wait_for_completion(&fs_info->qgroup_rescan_completion);
3382 
3383 	return ret;
3384 }
3385 
3386 /*
3387  * this is only called from open_ctree where we're still single threaded, thus
3388  * locking is omitted here.
3389  */
3390 void
3391 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3392 {
3393 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
3394 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3395 				 &fs_info->qgroup_rescan_work);
3396 }
3397 
3398 /*
3399  * Reserve qgroup space for range [start, start + len).
3400  *
3401  * This function will either reserve space from related qgroups or doing
3402  * nothing if the range is already reserved.
3403  *
3404  * Return 0 for successful reserve
3405  * Return <0 for error (including -EQUOT)
3406  *
3407  * NOTE: this function may sleep for memory allocation.
3408  *       if btrfs_qgroup_reserve_data() is called multiple times with
3409  *       same @reserved, caller must ensure when error happens it's OK
3410  *       to free *ALL* reserved space.
3411  */
3412 int btrfs_qgroup_reserve_data(struct inode *inode,
3413 			struct extent_changeset **reserved_ret, u64 start,
3414 			u64 len)
3415 {
3416 	struct btrfs_root *root = BTRFS_I(inode)->root;
3417 	struct ulist_node *unode;
3418 	struct ulist_iterator uiter;
3419 	struct extent_changeset *reserved;
3420 	u64 orig_reserved;
3421 	u64 to_reserve;
3422 	int ret;
3423 
3424 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3425 	    !is_fstree(root->root_key.objectid) || len == 0)
3426 		return 0;
3427 
3428 	/* @reserved parameter is mandatory for qgroup */
3429 	if (WARN_ON(!reserved_ret))
3430 		return -EINVAL;
3431 	if (!*reserved_ret) {
3432 		*reserved_ret = extent_changeset_alloc();
3433 		if (!*reserved_ret)
3434 			return -ENOMEM;
3435 	}
3436 	reserved = *reserved_ret;
3437 	/* Record already reserved space */
3438 	orig_reserved = reserved->bytes_changed;
3439 	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3440 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3441 
3442 	/* Newly reserved space */
3443 	to_reserve = reserved->bytes_changed - orig_reserved;
3444 	trace_btrfs_qgroup_reserve_data(inode, start, len,
3445 					to_reserve, QGROUP_RESERVE);
3446 	if (ret < 0)
3447 		goto cleanup;
3448 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3449 	if (ret < 0)
3450 		goto cleanup;
3451 
3452 	return ret;
3453 
3454 cleanup:
3455 	/* cleanup *ALL* already reserved ranges */
3456 	ULIST_ITER_INIT(&uiter);
3457 	while ((unode = ulist_next(&reserved->range_changed, &uiter)))
3458 		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
3459 				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
3460 	extent_changeset_release(reserved);
3461 	return ret;
3462 }
3463 
3464 /* Free ranges specified by @reserved, normally in error path */
3465 static int qgroup_free_reserved_data(struct inode *inode,
3466 			struct extent_changeset *reserved, u64 start, u64 len)
3467 {
3468 	struct btrfs_root *root = BTRFS_I(inode)->root;
3469 	struct ulist_node *unode;
3470 	struct ulist_iterator uiter;
3471 	struct extent_changeset changeset;
3472 	int freed = 0;
3473 	int ret;
3474 
3475 	extent_changeset_init(&changeset);
3476 	len = round_up(start + len, root->fs_info->sectorsize);
3477 	start = round_down(start, root->fs_info->sectorsize);
3478 
3479 	ULIST_ITER_INIT(&uiter);
3480 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3481 		u64 range_start = unode->val;
3482 		/* unode->aux is the inclusive end */
3483 		u64 range_len = unode->aux - range_start + 1;
3484 		u64 free_start;
3485 		u64 free_len;
3486 
3487 		extent_changeset_release(&changeset);
3488 
3489 		/* Only free range in range [start, start + len) */
3490 		if (range_start >= start + len ||
3491 		    range_start + range_len <= start)
3492 			continue;
3493 		free_start = max(range_start, start);
3494 		free_len = min(start + len, range_start + range_len) -
3495 			   free_start;
3496 		/*
3497 		 * TODO: To also modify reserved->ranges_reserved to reflect
3498 		 * the modification.
3499 		 *
3500 		 * However as long as we free qgroup reserved according to
3501 		 * EXTENT_QGROUP_RESERVED, we won't double free.
3502 		 * So not need to rush.
3503 		 */
3504 		ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
3505 				free_start, free_start + free_len - 1,
3506 				EXTENT_QGROUP_RESERVED, &changeset);
3507 		if (ret < 0)
3508 			goto out;
3509 		freed += changeset.bytes_changed;
3510 	}
3511 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3512 				  BTRFS_QGROUP_RSV_DATA);
3513 	ret = freed;
3514 out:
3515 	extent_changeset_release(&changeset);
3516 	return ret;
3517 }
3518 
3519 static int __btrfs_qgroup_release_data(struct inode *inode,
3520 			struct extent_changeset *reserved, u64 start, u64 len,
3521 			int free)
3522 {
3523 	struct extent_changeset changeset;
3524 	int trace_op = QGROUP_RELEASE;
3525 	int ret;
3526 
3527 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
3528 		      &BTRFS_I(inode)->root->fs_info->flags))
3529 		return 0;
3530 
3531 	/* In release case, we shouldn't have @reserved */
3532 	WARN_ON(!free && reserved);
3533 	if (free && reserved)
3534 		return qgroup_free_reserved_data(inode, reserved, start, len);
3535 	extent_changeset_init(&changeset);
3536 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3537 			start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3538 	if (ret < 0)
3539 		goto out;
3540 
3541 	if (free)
3542 		trace_op = QGROUP_FREE;
3543 	trace_btrfs_qgroup_release_data(inode, start, len,
3544 					changeset.bytes_changed, trace_op);
3545 	if (free)
3546 		btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3547 				BTRFS_I(inode)->root->root_key.objectid,
3548 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3549 	ret = changeset.bytes_changed;
3550 out:
3551 	extent_changeset_release(&changeset);
3552 	return ret;
3553 }
3554 
3555 /*
3556  * Free a reserved space range from io_tree and related qgroups
3557  *
3558  * Should be called when a range of pages get invalidated before reaching disk.
3559  * Or for error cleanup case.
3560  * if @reserved is given, only reserved range in [@start, @start + @len) will
3561  * be freed.
3562  *
3563  * For data written to disk, use btrfs_qgroup_release_data().
3564  *
3565  * NOTE: This function may sleep for memory allocation.
3566  */
3567 int btrfs_qgroup_free_data(struct inode *inode,
3568 			struct extent_changeset *reserved, u64 start, u64 len)
3569 {
3570 	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3571 }
3572 
3573 /*
3574  * Release a reserved space range from io_tree only.
3575  *
3576  * Should be called when a range of pages get written to disk and corresponding
3577  * FILE_EXTENT is inserted into corresponding root.
3578  *
3579  * Since new qgroup accounting framework will only update qgroup numbers at
3580  * commit_transaction() time, its reserved space shouldn't be freed from
3581  * related qgroups.
3582  *
3583  * But we should release the range from io_tree, to allow further write to be
3584  * COWed.
3585  *
3586  * NOTE: This function may sleep for memory allocation.
3587  */
3588 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3589 {
3590 	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3591 }
3592 
3593 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3594 			      enum btrfs_qgroup_rsv_type type)
3595 {
3596 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3597 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3598 		return;
3599 	if (num_bytes == 0)
3600 		return;
3601 
3602 	spin_lock(&root->qgroup_meta_rsv_lock);
3603 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3604 		root->qgroup_meta_rsv_prealloc += num_bytes;
3605 	else
3606 		root->qgroup_meta_rsv_pertrans += num_bytes;
3607 	spin_unlock(&root->qgroup_meta_rsv_lock);
3608 }
3609 
3610 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3611 			     enum btrfs_qgroup_rsv_type type)
3612 {
3613 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3614 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3615 		return 0;
3616 	if (num_bytes == 0)
3617 		return 0;
3618 
3619 	spin_lock(&root->qgroup_meta_rsv_lock);
3620 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3621 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3622 				  num_bytes);
3623 		root->qgroup_meta_rsv_prealloc -= num_bytes;
3624 	} else {
3625 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3626 				  num_bytes);
3627 		root->qgroup_meta_rsv_pertrans -= num_bytes;
3628 	}
3629 	spin_unlock(&root->qgroup_meta_rsv_lock);
3630 	return num_bytes;
3631 }
3632 
3633 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3634 				enum btrfs_qgroup_rsv_type type, bool enforce)
3635 {
3636 	struct btrfs_fs_info *fs_info = root->fs_info;
3637 	int ret;
3638 
3639 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3640 	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
3641 		return 0;
3642 
3643 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3644 	trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
3645 	ret = qgroup_reserve(root, num_bytes, enforce, type);
3646 	if (ret < 0)
3647 		return ret;
3648 	/*
3649 	 * Record what we have reserved into root.
3650 	 *
3651 	 * To avoid quota disabled->enabled underflow.
3652 	 * In that case, we may try to free space we haven't reserved
3653 	 * (since quota was disabled), so record what we reserved into root.
3654 	 * And ensure later release won't underflow this number.
3655 	 */
3656 	add_root_meta_rsv(root, num_bytes, type);
3657 	return ret;
3658 }
3659 
3660 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3661 {
3662 	struct btrfs_fs_info *fs_info = root->fs_info;
3663 
3664 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3665 	    !is_fstree(root->root_key.objectid))
3666 		return;
3667 
3668 	/* TODO: Update trace point to handle such free */
3669 	trace_qgroup_meta_free_all_pertrans(root);
3670 	/* Special value -1 means to free all reserved space */
3671 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3672 				  BTRFS_QGROUP_RSV_META_PERTRANS);
3673 }
3674 
3675 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3676 			      enum btrfs_qgroup_rsv_type type)
3677 {
3678 	struct btrfs_fs_info *fs_info = root->fs_info;
3679 
3680 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3681 	    !is_fstree(root->root_key.objectid))
3682 		return;
3683 
3684 	/*
3685 	 * reservation for META_PREALLOC can happen before quota is enabled,
3686 	 * which can lead to underflow.
3687 	 * Here ensure we will only free what we really have reserved.
3688 	 */
3689 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3690 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3691 	trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
3692 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
3693 				  num_bytes, type);
3694 }
3695 
3696 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3697 				int num_bytes)
3698 {
3699 	struct btrfs_root *quota_root = fs_info->quota_root;
3700 	struct btrfs_qgroup *qgroup;
3701 	struct ulist_node *unode;
3702 	struct ulist_iterator uiter;
3703 	int ret = 0;
3704 
3705 	if (num_bytes == 0)
3706 		return;
3707 	if (!quota_root)
3708 		return;
3709 
3710 	spin_lock(&fs_info->qgroup_lock);
3711 	qgroup = find_qgroup_rb(fs_info, ref_root);
3712 	if (!qgroup)
3713 		goto out;
3714 	ulist_reinit(fs_info->qgroup_ulist);
3715 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3716 		       qgroup_to_aux(qgroup), GFP_ATOMIC);
3717 	if (ret < 0)
3718 		goto out;
3719 	ULIST_ITER_INIT(&uiter);
3720 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3721 		struct btrfs_qgroup *qg;
3722 		struct btrfs_qgroup_list *glist;
3723 
3724 		qg = unode_aux_to_qgroup(unode);
3725 
3726 		qgroup_rsv_release(fs_info, qg, num_bytes,
3727 				BTRFS_QGROUP_RSV_META_PREALLOC);
3728 		qgroup_rsv_add(fs_info, qg, num_bytes,
3729 				BTRFS_QGROUP_RSV_META_PERTRANS);
3730 		list_for_each_entry(glist, &qg->groups, next_group) {
3731 			ret = ulist_add(fs_info->qgroup_ulist,
3732 					glist->group->qgroupid,
3733 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3734 			if (ret < 0)
3735 				goto out;
3736 		}
3737 	}
3738 out:
3739 	spin_unlock(&fs_info->qgroup_lock);
3740 }
3741 
3742 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3743 {
3744 	struct btrfs_fs_info *fs_info = root->fs_info;
3745 
3746 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3747 	    !is_fstree(root->root_key.objectid))
3748 		return;
3749 	/* Same as btrfs_qgroup_free_meta_prealloc() */
3750 	num_bytes = sub_root_meta_rsv(root, num_bytes,
3751 				      BTRFS_QGROUP_RSV_META_PREALLOC);
3752 	trace_qgroup_meta_convert(root, num_bytes);
3753 	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
3754 }
3755 
3756 /*
3757  * Check qgroup reserved space leaking, normally at destroy inode
3758  * time
3759  */
3760 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3761 {
3762 	struct extent_changeset changeset;
3763 	struct ulist_node *unode;
3764 	struct ulist_iterator iter;
3765 	int ret;
3766 
3767 	extent_changeset_init(&changeset);
3768 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3769 			EXTENT_QGROUP_RESERVED, &changeset);
3770 
3771 	WARN_ON(ret < 0);
3772 	if (WARN_ON(changeset.bytes_changed)) {
3773 		ULIST_ITER_INIT(&iter);
3774 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3775 			btrfs_warn(BTRFS_I(inode)->root->fs_info,
3776 				"leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3777 				inode->i_ino, unode->val, unode->aux);
3778 		}
3779 		btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3780 				BTRFS_I(inode)->root->root_key.objectid,
3781 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3782 
3783 	}
3784 	extent_changeset_release(&changeset);
3785 }
3786