xref: /openbmc/linux/fs/btrfs/qgroup.c (revision 029f7f3b8701cc7aca8bdb31f0c7edd6a479e357)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27 
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34 #include "extent_io.h"
35 #include "qgroup.h"
36 
37 
38 /* TODO XXX FIXME
39  *  - subvol delete -> delete when ref goes to 0? delete limits also?
40  *  - reorganize keys
41  *  - compressed
42  *  - sync
43  *  - copy also limits on subvol creation
44  *  - limit
45  *  - caches fuer ulists
46  *  - performance benchmarks
47  *  - check all ioctl parameters
48  */
49 
50 /*
51  * one struct for each qgroup, organized in fs_info->qgroup_tree.
52  */
53 struct btrfs_qgroup {
54 	u64 qgroupid;
55 
56 	/*
57 	 * state
58 	 */
59 	u64 rfer;	/* referenced */
60 	u64 rfer_cmpr;	/* referenced compressed */
61 	u64 excl;	/* exclusive */
62 	u64 excl_cmpr;	/* exclusive compressed */
63 
64 	/*
65 	 * limits
66 	 */
67 	u64 lim_flags;	/* which limits are set */
68 	u64 max_rfer;
69 	u64 max_excl;
70 	u64 rsv_rfer;
71 	u64 rsv_excl;
72 
73 	/*
74 	 * reservation tracking
75 	 */
76 	u64 reserved;
77 
78 	/*
79 	 * lists
80 	 */
81 	struct list_head groups;  /* groups this group is member of */
82 	struct list_head members; /* groups that are members of this group */
83 	struct list_head dirty;   /* dirty groups */
84 	struct rb_node node;	  /* tree of qgroups */
85 
86 	/*
87 	 * temp variables for accounting operations
88 	 * Refer to qgroup_shared_accouting() for details.
89 	 */
90 	u64 old_refcnt;
91 	u64 new_refcnt;
92 };
93 
94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
95 					   int mod)
96 {
97 	if (qg->old_refcnt < seq)
98 		qg->old_refcnt = seq;
99 	qg->old_refcnt += mod;
100 }
101 
102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
103 					   int mod)
104 {
105 	if (qg->new_refcnt < seq)
106 		qg->new_refcnt = seq;
107 	qg->new_refcnt += mod;
108 }
109 
110 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
111 {
112 	if (qg->old_refcnt < seq)
113 		return 0;
114 	return qg->old_refcnt - seq;
115 }
116 
117 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
118 {
119 	if (qg->new_refcnt < seq)
120 		return 0;
121 	return qg->new_refcnt - seq;
122 }
123 
124 /*
125  * glue structure to represent the relations between qgroups.
126  */
127 struct btrfs_qgroup_list {
128 	struct list_head next_group;
129 	struct list_head next_member;
130 	struct btrfs_qgroup *group;
131 	struct btrfs_qgroup *member;
132 };
133 
134 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
135 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
136 
137 static int
138 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
139 		   int init_flags);
140 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
141 
142 /* must be called with qgroup_ioctl_lock held */
143 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
144 					   u64 qgroupid)
145 {
146 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
147 	struct btrfs_qgroup *qgroup;
148 
149 	while (n) {
150 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
151 		if (qgroup->qgroupid < qgroupid)
152 			n = n->rb_left;
153 		else if (qgroup->qgroupid > qgroupid)
154 			n = n->rb_right;
155 		else
156 			return qgroup;
157 	}
158 	return NULL;
159 }
160 
161 /* must be called with qgroup_lock held */
162 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
163 					  u64 qgroupid)
164 {
165 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
166 	struct rb_node *parent = NULL;
167 	struct btrfs_qgroup *qgroup;
168 
169 	while (*p) {
170 		parent = *p;
171 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
172 
173 		if (qgroup->qgroupid < qgroupid)
174 			p = &(*p)->rb_left;
175 		else if (qgroup->qgroupid > qgroupid)
176 			p = &(*p)->rb_right;
177 		else
178 			return qgroup;
179 	}
180 
181 	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
182 	if (!qgroup)
183 		return ERR_PTR(-ENOMEM);
184 
185 	qgroup->qgroupid = qgroupid;
186 	INIT_LIST_HEAD(&qgroup->groups);
187 	INIT_LIST_HEAD(&qgroup->members);
188 	INIT_LIST_HEAD(&qgroup->dirty);
189 
190 	rb_link_node(&qgroup->node, parent, p);
191 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
192 
193 	return qgroup;
194 }
195 
196 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
197 {
198 	struct btrfs_qgroup_list *list;
199 
200 	list_del(&qgroup->dirty);
201 	while (!list_empty(&qgroup->groups)) {
202 		list = list_first_entry(&qgroup->groups,
203 					struct btrfs_qgroup_list, next_group);
204 		list_del(&list->next_group);
205 		list_del(&list->next_member);
206 		kfree(list);
207 	}
208 
209 	while (!list_empty(&qgroup->members)) {
210 		list = list_first_entry(&qgroup->members,
211 					struct btrfs_qgroup_list, next_member);
212 		list_del(&list->next_group);
213 		list_del(&list->next_member);
214 		kfree(list);
215 	}
216 	kfree(qgroup);
217 }
218 
219 /* must be called with qgroup_lock held */
220 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
221 {
222 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
223 
224 	if (!qgroup)
225 		return -ENOENT;
226 
227 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
228 	__del_qgroup_rb(qgroup);
229 	return 0;
230 }
231 
232 /* must be called with qgroup_lock held */
233 static int add_relation_rb(struct btrfs_fs_info *fs_info,
234 			   u64 memberid, u64 parentid)
235 {
236 	struct btrfs_qgroup *member;
237 	struct btrfs_qgroup *parent;
238 	struct btrfs_qgroup_list *list;
239 
240 	member = find_qgroup_rb(fs_info, memberid);
241 	parent = find_qgroup_rb(fs_info, parentid);
242 	if (!member || !parent)
243 		return -ENOENT;
244 
245 	list = kzalloc(sizeof(*list), GFP_ATOMIC);
246 	if (!list)
247 		return -ENOMEM;
248 
249 	list->group = parent;
250 	list->member = member;
251 	list_add_tail(&list->next_group, &member->groups);
252 	list_add_tail(&list->next_member, &parent->members);
253 
254 	return 0;
255 }
256 
257 /* must be called with qgroup_lock held */
258 static int del_relation_rb(struct btrfs_fs_info *fs_info,
259 			   u64 memberid, u64 parentid)
260 {
261 	struct btrfs_qgroup *member;
262 	struct btrfs_qgroup *parent;
263 	struct btrfs_qgroup_list *list;
264 
265 	member = find_qgroup_rb(fs_info, memberid);
266 	parent = find_qgroup_rb(fs_info, parentid);
267 	if (!member || !parent)
268 		return -ENOENT;
269 
270 	list_for_each_entry(list, &member->groups, next_group) {
271 		if (list->group == parent) {
272 			list_del(&list->next_group);
273 			list_del(&list->next_member);
274 			kfree(list);
275 			return 0;
276 		}
277 	}
278 	return -ENOENT;
279 }
280 
281 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
282 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
283 			       u64 rfer, u64 excl)
284 {
285 	struct btrfs_qgroup *qgroup;
286 
287 	qgroup = find_qgroup_rb(fs_info, qgroupid);
288 	if (!qgroup)
289 		return -EINVAL;
290 	if (qgroup->rfer != rfer || qgroup->excl != excl)
291 		return -EINVAL;
292 	return 0;
293 }
294 #endif
295 
296 /*
297  * The full config is read in one go, only called from open_ctree()
298  * It doesn't use any locking, as at this point we're still single-threaded
299  */
300 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
301 {
302 	struct btrfs_key key;
303 	struct btrfs_key found_key;
304 	struct btrfs_root *quota_root = fs_info->quota_root;
305 	struct btrfs_path *path = NULL;
306 	struct extent_buffer *l;
307 	int slot;
308 	int ret = 0;
309 	u64 flags = 0;
310 	u64 rescan_progress = 0;
311 
312 	if (!fs_info->quota_enabled)
313 		return 0;
314 
315 	fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
316 	if (!fs_info->qgroup_ulist) {
317 		ret = -ENOMEM;
318 		goto out;
319 	}
320 
321 	path = btrfs_alloc_path();
322 	if (!path) {
323 		ret = -ENOMEM;
324 		goto out;
325 	}
326 
327 	/* default this to quota off, in case no status key is found */
328 	fs_info->qgroup_flags = 0;
329 
330 	/*
331 	 * pass 1: read status, all qgroup infos and limits
332 	 */
333 	key.objectid = 0;
334 	key.type = 0;
335 	key.offset = 0;
336 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
337 	if (ret)
338 		goto out;
339 
340 	while (1) {
341 		struct btrfs_qgroup *qgroup;
342 
343 		slot = path->slots[0];
344 		l = path->nodes[0];
345 		btrfs_item_key_to_cpu(l, &found_key, slot);
346 
347 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
348 			struct btrfs_qgroup_status_item *ptr;
349 
350 			ptr = btrfs_item_ptr(l, slot,
351 					     struct btrfs_qgroup_status_item);
352 
353 			if (btrfs_qgroup_status_version(l, ptr) !=
354 			    BTRFS_QGROUP_STATUS_VERSION) {
355 				btrfs_err(fs_info,
356 				 "old qgroup version, quota disabled");
357 				goto out;
358 			}
359 			if (btrfs_qgroup_status_generation(l, ptr) !=
360 			    fs_info->generation) {
361 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
362 				btrfs_err(fs_info,
363 					"qgroup generation mismatch, "
364 					"marked as inconsistent");
365 			}
366 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
367 									  ptr);
368 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
369 			goto next1;
370 		}
371 
372 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
373 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
374 			goto next1;
375 
376 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
377 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
378 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
379 			btrfs_err(fs_info, "inconsistent qgroup config");
380 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
381 		}
382 		if (!qgroup) {
383 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
384 			if (IS_ERR(qgroup)) {
385 				ret = PTR_ERR(qgroup);
386 				goto out;
387 			}
388 		}
389 		switch (found_key.type) {
390 		case BTRFS_QGROUP_INFO_KEY: {
391 			struct btrfs_qgroup_info_item *ptr;
392 
393 			ptr = btrfs_item_ptr(l, slot,
394 					     struct btrfs_qgroup_info_item);
395 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
396 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
397 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
398 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
399 			/* generation currently unused */
400 			break;
401 		}
402 		case BTRFS_QGROUP_LIMIT_KEY: {
403 			struct btrfs_qgroup_limit_item *ptr;
404 
405 			ptr = btrfs_item_ptr(l, slot,
406 					     struct btrfs_qgroup_limit_item);
407 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
408 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
409 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
410 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
411 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
412 			break;
413 		}
414 		}
415 next1:
416 		ret = btrfs_next_item(quota_root, path);
417 		if (ret < 0)
418 			goto out;
419 		if (ret)
420 			break;
421 	}
422 	btrfs_release_path(path);
423 
424 	/*
425 	 * pass 2: read all qgroup relations
426 	 */
427 	key.objectid = 0;
428 	key.type = BTRFS_QGROUP_RELATION_KEY;
429 	key.offset = 0;
430 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
431 	if (ret)
432 		goto out;
433 	while (1) {
434 		slot = path->slots[0];
435 		l = path->nodes[0];
436 		btrfs_item_key_to_cpu(l, &found_key, slot);
437 
438 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
439 			goto next2;
440 
441 		if (found_key.objectid > found_key.offset) {
442 			/* parent <- member, not needed to build config */
443 			/* FIXME should we omit the key completely? */
444 			goto next2;
445 		}
446 
447 		ret = add_relation_rb(fs_info, found_key.objectid,
448 				      found_key.offset);
449 		if (ret == -ENOENT) {
450 			btrfs_warn(fs_info,
451 				"orphan qgroup relation 0x%llx->0x%llx",
452 				found_key.objectid, found_key.offset);
453 			ret = 0;	/* ignore the error */
454 		}
455 		if (ret)
456 			goto out;
457 next2:
458 		ret = btrfs_next_item(quota_root, path);
459 		if (ret < 0)
460 			goto out;
461 		if (ret)
462 			break;
463 	}
464 out:
465 	fs_info->qgroup_flags |= flags;
466 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
467 		fs_info->quota_enabled = 0;
468 		fs_info->pending_quota_state = 0;
469 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
470 		   ret >= 0) {
471 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
472 	}
473 	btrfs_free_path(path);
474 
475 	if (ret < 0) {
476 		ulist_free(fs_info->qgroup_ulist);
477 		fs_info->qgroup_ulist = NULL;
478 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
479 	}
480 
481 	return ret < 0 ? ret : 0;
482 }
483 
484 /*
485  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
486  * first two are in single-threaded paths.And for the third one, we have set
487  * quota_root to be null with qgroup_lock held before, so it is safe to clean
488  * up the in-memory structures without qgroup_lock held.
489  */
490 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
491 {
492 	struct rb_node *n;
493 	struct btrfs_qgroup *qgroup;
494 
495 	while ((n = rb_first(&fs_info->qgroup_tree))) {
496 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
497 		rb_erase(n, &fs_info->qgroup_tree);
498 		__del_qgroup_rb(qgroup);
499 	}
500 	/*
501 	 * we call btrfs_free_qgroup_config() when umounting
502 	 * filesystem and disabling quota, so we set qgroup_ulit
503 	 * to be null here to avoid double free.
504 	 */
505 	ulist_free(fs_info->qgroup_ulist);
506 	fs_info->qgroup_ulist = NULL;
507 }
508 
509 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
510 				    struct btrfs_root *quota_root,
511 				    u64 src, u64 dst)
512 {
513 	int ret;
514 	struct btrfs_path *path;
515 	struct btrfs_key key;
516 
517 	path = btrfs_alloc_path();
518 	if (!path)
519 		return -ENOMEM;
520 
521 	key.objectid = src;
522 	key.type = BTRFS_QGROUP_RELATION_KEY;
523 	key.offset = dst;
524 
525 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
526 
527 	btrfs_mark_buffer_dirty(path->nodes[0]);
528 
529 	btrfs_free_path(path);
530 	return ret;
531 }
532 
533 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
534 				    struct btrfs_root *quota_root,
535 				    u64 src, u64 dst)
536 {
537 	int ret;
538 	struct btrfs_path *path;
539 	struct btrfs_key key;
540 
541 	path = btrfs_alloc_path();
542 	if (!path)
543 		return -ENOMEM;
544 
545 	key.objectid = src;
546 	key.type = BTRFS_QGROUP_RELATION_KEY;
547 	key.offset = dst;
548 
549 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
550 	if (ret < 0)
551 		goto out;
552 
553 	if (ret > 0) {
554 		ret = -ENOENT;
555 		goto out;
556 	}
557 
558 	ret = btrfs_del_item(trans, quota_root, path);
559 out:
560 	btrfs_free_path(path);
561 	return ret;
562 }
563 
564 static int add_qgroup_item(struct btrfs_trans_handle *trans,
565 			   struct btrfs_root *quota_root, u64 qgroupid)
566 {
567 	int ret;
568 	struct btrfs_path *path;
569 	struct btrfs_qgroup_info_item *qgroup_info;
570 	struct btrfs_qgroup_limit_item *qgroup_limit;
571 	struct extent_buffer *leaf;
572 	struct btrfs_key key;
573 
574 	if (btrfs_test_is_dummy_root(quota_root))
575 		return 0;
576 
577 	path = btrfs_alloc_path();
578 	if (!path)
579 		return -ENOMEM;
580 
581 	key.objectid = 0;
582 	key.type = BTRFS_QGROUP_INFO_KEY;
583 	key.offset = qgroupid;
584 
585 	/*
586 	 * Avoid a transaction abort by catching -EEXIST here. In that
587 	 * case, we proceed by re-initializing the existing structure
588 	 * on disk.
589 	 */
590 
591 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
592 				      sizeof(*qgroup_info));
593 	if (ret && ret != -EEXIST)
594 		goto out;
595 
596 	leaf = path->nodes[0];
597 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
598 				 struct btrfs_qgroup_info_item);
599 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
600 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
601 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
602 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
603 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
604 
605 	btrfs_mark_buffer_dirty(leaf);
606 
607 	btrfs_release_path(path);
608 
609 	key.type = BTRFS_QGROUP_LIMIT_KEY;
610 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
611 				      sizeof(*qgroup_limit));
612 	if (ret && ret != -EEXIST)
613 		goto out;
614 
615 	leaf = path->nodes[0];
616 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
617 				  struct btrfs_qgroup_limit_item);
618 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
619 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
620 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
621 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
622 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
623 
624 	btrfs_mark_buffer_dirty(leaf);
625 
626 	ret = 0;
627 out:
628 	btrfs_free_path(path);
629 	return ret;
630 }
631 
632 static int del_qgroup_item(struct btrfs_trans_handle *trans,
633 			   struct btrfs_root *quota_root, u64 qgroupid)
634 {
635 	int ret;
636 	struct btrfs_path *path;
637 	struct btrfs_key key;
638 
639 	path = btrfs_alloc_path();
640 	if (!path)
641 		return -ENOMEM;
642 
643 	key.objectid = 0;
644 	key.type = BTRFS_QGROUP_INFO_KEY;
645 	key.offset = qgroupid;
646 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
647 	if (ret < 0)
648 		goto out;
649 
650 	if (ret > 0) {
651 		ret = -ENOENT;
652 		goto out;
653 	}
654 
655 	ret = btrfs_del_item(trans, quota_root, path);
656 	if (ret)
657 		goto out;
658 
659 	btrfs_release_path(path);
660 
661 	key.type = BTRFS_QGROUP_LIMIT_KEY;
662 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
663 	if (ret < 0)
664 		goto out;
665 
666 	if (ret > 0) {
667 		ret = -ENOENT;
668 		goto out;
669 	}
670 
671 	ret = btrfs_del_item(trans, quota_root, path);
672 
673 out:
674 	btrfs_free_path(path);
675 	return ret;
676 }
677 
678 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
679 				    struct btrfs_root *root,
680 				    struct btrfs_qgroup *qgroup)
681 {
682 	struct btrfs_path *path;
683 	struct btrfs_key key;
684 	struct extent_buffer *l;
685 	struct btrfs_qgroup_limit_item *qgroup_limit;
686 	int ret;
687 	int slot;
688 
689 	key.objectid = 0;
690 	key.type = BTRFS_QGROUP_LIMIT_KEY;
691 	key.offset = qgroup->qgroupid;
692 
693 	path = btrfs_alloc_path();
694 	if (!path)
695 		return -ENOMEM;
696 
697 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
698 	if (ret > 0)
699 		ret = -ENOENT;
700 
701 	if (ret)
702 		goto out;
703 
704 	l = path->nodes[0];
705 	slot = path->slots[0];
706 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
707 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
708 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
709 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
710 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
711 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
712 
713 	btrfs_mark_buffer_dirty(l);
714 
715 out:
716 	btrfs_free_path(path);
717 	return ret;
718 }
719 
720 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
721 				   struct btrfs_root *root,
722 				   struct btrfs_qgroup *qgroup)
723 {
724 	struct btrfs_path *path;
725 	struct btrfs_key key;
726 	struct extent_buffer *l;
727 	struct btrfs_qgroup_info_item *qgroup_info;
728 	int ret;
729 	int slot;
730 
731 	if (btrfs_test_is_dummy_root(root))
732 		return 0;
733 
734 	key.objectid = 0;
735 	key.type = BTRFS_QGROUP_INFO_KEY;
736 	key.offset = qgroup->qgroupid;
737 
738 	path = btrfs_alloc_path();
739 	if (!path)
740 		return -ENOMEM;
741 
742 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
743 	if (ret > 0)
744 		ret = -ENOENT;
745 
746 	if (ret)
747 		goto out;
748 
749 	l = path->nodes[0];
750 	slot = path->slots[0];
751 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
752 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
753 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
754 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
755 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
756 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
757 
758 	btrfs_mark_buffer_dirty(l);
759 
760 out:
761 	btrfs_free_path(path);
762 	return ret;
763 }
764 
765 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
766 				     struct btrfs_fs_info *fs_info,
767 				    struct btrfs_root *root)
768 {
769 	struct btrfs_path *path;
770 	struct btrfs_key key;
771 	struct extent_buffer *l;
772 	struct btrfs_qgroup_status_item *ptr;
773 	int ret;
774 	int slot;
775 
776 	key.objectid = 0;
777 	key.type = BTRFS_QGROUP_STATUS_KEY;
778 	key.offset = 0;
779 
780 	path = btrfs_alloc_path();
781 	if (!path)
782 		return -ENOMEM;
783 
784 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
785 	if (ret > 0)
786 		ret = -ENOENT;
787 
788 	if (ret)
789 		goto out;
790 
791 	l = path->nodes[0];
792 	slot = path->slots[0];
793 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
794 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
795 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
796 	btrfs_set_qgroup_status_rescan(l, ptr,
797 				fs_info->qgroup_rescan_progress.objectid);
798 
799 	btrfs_mark_buffer_dirty(l);
800 
801 out:
802 	btrfs_free_path(path);
803 	return ret;
804 }
805 
806 /*
807  * called with qgroup_lock held
808  */
809 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
810 				  struct btrfs_root *root)
811 {
812 	struct btrfs_path *path;
813 	struct btrfs_key key;
814 	struct extent_buffer *leaf = NULL;
815 	int ret;
816 	int nr = 0;
817 
818 	path = btrfs_alloc_path();
819 	if (!path)
820 		return -ENOMEM;
821 
822 	path->leave_spinning = 1;
823 
824 	key.objectid = 0;
825 	key.offset = 0;
826 	key.type = 0;
827 
828 	while (1) {
829 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
830 		if (ret < 0)
831 			goto out;
832 		leaf = path->nodes[0];
833 		nr = btrfs_header_nritems(leaf);
834 		if (!nr)
835 			break;
836 		/*
837 		 * delete the leaf one by one
838 		 * since the whole tree is going
839 		 * to be deleted.
840 		 */
841 		path->slots[0] = 0;
842 		ret = btrfs_del_items(trans, root, path, 0, nr);
843 		if (ret)
844 			goto out;
845 
846 		btrfs_release_path(path);
847 	}
848 	ret = 0;
849 out:
850 	root->fs_info->pending_quota_state = 0;
851 	btrfs_free_path(path);
852 	return ret;
853 }
854 
855 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
856 		       struct btrfs_fs_info *fs_info)
857 {
858 	struct btrfs_root *quota_root;
859 	struct btrfs_root *tree_root = fs_info->tree_root;
860 	struct btrfs_path *path = NULL;
861 	struct btrfs_qgroup_status_item *ptr;
862 	struct extent_buffer *leaf;
863 	struct btrfs_key key;
864 	struct btrfs_key found_key;
865 	struct btrfs_qgroup *qgroup = NULL;
866 	int ret = 0;
867 	int slot;
868 
869 	mutex_lock(&fs_info->qgroup_ioctl_lock);
870 	if (fs_info->quota_root) {
871 		fs_info->pending_quota_state = 1;
872 		goto out;
873 	}
874 
875 	fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
876 	if (!fs_info->qgroup_ulist) {
877 		ret = -ENOMEM;
878 		goto out;
879 	}
880 
881 	/*
882 	 * initially create the quota tree
883 	 */
884 	quota_root = btrfs_create_tree(trans, fs_info,
885 				       BTRFS_QUOTA_TREE_OBJECTID);
886 	if (IS_ERR(quota_root)) {
887 		ret =  PTR_ERR(quota_root);
888 		goto out;
889 	}
890 
891 	path = btrfs_alloc_path();
892 	if (!path) {
893 		ret = -ENOMEM;
894 		goto out_free_root;
895 	}
896 
897 	key.objectid = 0;
898 	key.type = BTRFS_QGROUP_STATUS_KEY;
899 	key.offset = 0;
900 
901 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
902 				      sizeof(*ptr));
903 	if (ret)
904 		goto out_free_path;
905 
906 	leaf = path->nodes[0];
907 	ptr = btrfs_item_ptr(leaf, path->slots[0],
908 				 struct btrfs_qgroup_status_item);
909 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
910 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
911 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
912 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
913 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
914 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
915 
916 	btrfs_mark_buffer_dirty(leaf);
917 
918 	key.objectid = 0;
919 	key.type = BTRFS_ROOT_REF_KEY;
920 	key.offset = 0;
921 
922 	btrfs_release_path(path);
923 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
924 	if (ret > 0)
925 		goto out_add_root;
926 	if (ret < 0)
927 		goto out_free_path;
928 
929 
930 	while (1) {
931 		slot = path->slots[0];
932 		leaf = path->nodes[0];
933 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
934 
935 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
936 			ret = add_qgroup_item(trans, quota_root,
937 					      found_key.offset);
938 			if (ret)
939 				goto out_free_path;
940 
941 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
942 			if (IS_ERR(qgroup)) {
943 				ret = PTR_ERR(qgroup);
944 				goto out_free_path;
945 			}
946 		}
947 		ret = btrfs_next_item(tree_root, path);
948 		if (ret < 0)
949 			goto out_free_path;
950 		if (ret)
951 			break;
952 	}
953 
954 out_add_root:
955 	btrfs_release_path(path);
956 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
957 	if (ret)
958 		goto out_free_path;
959 
960 	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
961 	if (IS_ERR(qgroup)) {
962 		ret = PTR_ERR(qgroup);
963 		goto out_free_path;
964 	}
965 	spin_lock(&fs_info->qgroup_lock);
966 	fs_info->quota_root = quota_root;
967 	fs_info->pending_quota_state = 1;
968 	spin_unlock(&fs_info->qgroup_lock);
969 out_free_path:
970 	btrfs_free_path(path);
971 out_free_root:
972 	if (ret) {
973 		free_extent_buffer(quota_root->node);
974 		free_extent_buffer(quota_root->commit_root);
975 		kfree(quota_root);
976 	}
977 out:
978 	if (ret) {
979 		ulist_free(fs_info->qgroup_ulist);
980 		fs_info->qgroup_ulist = NULL;
981 	}
982 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
983 	return ret;
984 }
985 
986 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
987 			struct btrfs_fs_info *fs_info)
988 {
989 	struct btrfs_root *tree_root = fs_info->tree_root;
990 	struct btrfs_root *quota_root;
991 	int ret = 0;
992 
993 	mutex_lock(&fs_info->qgroup_ioctl_lock);
994 	if (!fs_info->quota_root)
995 		goto out;
996 	spin_lock(&fs_info->qgroup_lock);
997 	fs_info->quota_enabled = 0;
998 	fs_info->pending_quota_state = 0;
999 	quota_root = fs_info->quota_root;
1000 	fs_info->quota_root = NULL;
1001 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1002 	spin_unlock(&fs_info->qgroup_lock);
1003 
1004 	btrfs_free_qgroup_config(fs_info);
1005 
1006 	ret = btrfs_clean_quota_tree(trans, quota_root);
1007 	if (ret)
1008 		goto out;
1009 
1010 	ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
1011 	if (ret)
1012 		goto out;
1013 
1014 	list_del(&quota_root->dirty_list);
1015 
1016 	btrfs_tree_lock(quota_root->node);
1017 	clean_tree_block(trans, tree_root->fs_info, quota_root->node);
1018 	btrfs_tree_unlock(quota_root->node);
1019 	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1020 
1021 	free_extent_buffer(quota_root->node);
1022 	free_extent_buffer(quota_root->commit_root);
1023 	kfree(quota_root);
1024 out:
1025 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1026 	return ret;
1027 }
1028 
1029 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1030 			 struct btrfs_qgroup *qgroup)
1031 {
1032 	if (list_empty(&qgroup->dirty))
1033 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1034 }
1035 
1036 /*
1037  * The easy accounting, if we are adding/removing the only ref for an extent
1038  * then this qgroup and all of the parent qgroups get their refrence and
1039  * exclusive counts adjusted.
1040  *
1041  * Caller should hold fs_info->qgroup_lock.
1042  */
1043 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1044 				    struct ulist *tmp, u64 ref_root,
1045 				    u64 num_bytes, int sign)
1046 {
1047 	struct btrfs_qgroup *qgroup;
1048 	struct btrfs_qgroup_list *glist;
1049 	struct ulist_node *unode;
1050 	struct ulist_iterator uiter;
1051 	int ret = 0;
1052 
1053 	qgroup = find_qgroup_rb(fs_info, ref_root);
1054 	if (!qgroup)
1055 		goto out;
1056 
1057 	qgroup->rfer += sign * num_bytes;
1058 	qgroup->rfer_cmpr += sign * num_bytes;
1059 
1060 	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1061 	qgroup->excl += sign * num_bytes;
1062 	qgroup->excl_cmpr += sign * num_bytes;
1063 	if (sign > 0)
1064 		qgroup->reserved -= num_bytes;
1065 
1066 	qgroup_dirty(fs_info, qgroup);
1067 
1068 	/* Get all of the parent groups that contain this qgroup */
1069 	list_for_each_entry(glist, &qgroup->groups, next_group) {
1070 		ret = ulist_add(tmp, glist->group->qgroupid,
1071 				ptr_to_u64(glist->group), GFP_ATOMIC);
1072 		if (ret < 0)
1073 			goto out;
1074 	}
1075 
1076 	/* Iterate all of the parents and adjust their reference counts */
1077 	ULIST_ITER_INIT(&uiter);
1078 	while ((unode = ulist_next(tmp, &uiter))) {
1079 		qgroup = u64_to_ptr(unode->aux);
1080 		qgroup->rfer += sign * num_bytes;
1081 		qgroup->rfer_cmpr += sign * num_bytes;
1082 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1083 		qgroup->excl += sign * num_bytes;
1084 		if (sign > 0)
1085 			qgroup->reserved -= num_bytes;
1086 		qgroup->excl_cmpr += sign * num_bytes;
1087 		qgroup_dirty(fs_info, qgroup);
1088 
1089 		/* Add any parents of the parents */
1090 		list_for_each_entry(glist, &qgroup->groups, next_group) {
1091 			ret = ulist_add(tmp, glist->group->qgroupid,
1092 					ptr_to_u64(glist->group), GFP_ATOMIC);
1093 			if (ret < 0)
1094 				goto out;
1095 		}
1096 	}
1097 	ret = 0;
1098 out:
1099 	return ret;
1100 }
1101 
1102 
1103 /*
1104  * Quick path for updating qgroup with only excl refs.
1105  *
1106  * In that case, just update all parent will be enough.
1107  * Or we needs to do a full rescan.
1108  * Caller should also hold fs_info->qgroup_lock.
1109  *
1110  * Return 0 for quick update, return >0 for need to full rescan
1111  * and mark INCONSISTENT flag.
1112  * Return < 0 for other error.
1113  */
1114 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1115 				   struct ulist *tmp, u64 src, u64 dst,
1116 				   int sign)
1117 {
1118 	struct btrfs_qgroup *qgroup;
1119 	int ret = 1;
1120 	int err = 0;
1121 
1122 	qgroup = find_qgroup_rb(fs_info, src);
1123 	if (!qgroup)
1124 		goto out;
1125 	if (qgroup->excl == qgroup->rfer) {
1126 		ret = 0;
1127 		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1128 					       qgroup->excl, sign);
1129 		if (err < 0) {
1130 			ret = err;
1131 			goto out;
1132 		}
1133 	}
1134 out:
1135 	if (ret)
1136 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1137 	return ret;
1138 }
1139 
1140 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1141 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1142 {
1143 	struct btrfs_root *quota_root;
1144 	struct btrfs_qgroup *parent;
1145 	struct btrfs_qgroup *member;
1146 	struct btrfs_qgroup_list *list;
1147 	struct ulist *tmp;
1148 	int ret = 0;
1149 
1150 	/* Check the level of src and dst first */
1151 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1152 		return -EINVAL;
1153 
1154 	tmp = ulist_alloc(GFP_NOFS);
1155 	if (!tmp)
1156 		return -ENOMEM;
1157 
1158 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1159 	quota_root = fs_info->quota_root;
1160 	if (!quota_root) {
1161 		ret = -EINVAL;
1162 		goto out;
1163 	}
1164 	member = find_qgroup_rb(fs_info, src);
1165 	parent = find_qgroup_rb(fs_info, dst);
1166 	if (!member || !parent) {
1167 		ret = -EINVAL;
1168 		goto out;
1169 	}
1170 
1171 	/* check if such qgroup relation exist firstly */
1172 	list_for_each_entry(list, &member->groups, next_group) {
1173 		if (list->group == parent) {
1174 			ret = -EEXIST;
1175 			goto out;
1176 		}
1177 	}
1178 
1179 	ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1180 	if (ret)
1181 		goto out;
1182 
1183 	ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1184 	if (ret) {
1185 		del_qgroup_relation_item(trans, quota_root, src, dst);
1186 		goto out;
1187 	}
1188 
1189 	spin_lock(&fs_info->qgroup_lock);
1190 	ret = add_relation_rb(quota_root->fs_info, src, dst);
1191 	if (ret < 0) {
1192 		spin_unlock(&fs_info->qgroup_lock);
1193 		goto out;
1194 	}
1195 	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1196 	spin_unlock(&fs_info->qgroup_lock);
1197 out:
1198 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1199 	ulist_free(tmp);
1200 	return ret;
1201 }
1202 
1203 int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1204 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1205 {
1206 	struct btrfs_root *quota_root;
1207 	struct btrfs_qgroup *parent;
1208 	struct btrfs_qgroup *member;
1209 	struct btrfs_qgroup_list *list;
1210 	struct ulist *tmp;
1211 	int ret = 0;
1212 	int err;
1213 
1214 	tmp = ulist_alloc(GFP_NOFS);
1215 	if (!tmp)
1216 		return -ENOMEM;
1217 
1218 	quota_root = fs_info->quota_root;
1219 	if (!quota_root) {
1220 		ret = -EINVAL;
1221 		goto out;
1222 	}
1223 
1224 	member = find_qgroup_rb(fs_info, src);
1225 	parent = find_qgroup_rb(fs_info, dst);
1226 	if (!member || !parent) {
1227 		ret = -EINVAL;
1228 		goto out;
1229 	}
1230 
1231 	/* check if such qgroup relation exist firstly */
1232 	list_for_each_entry(list, &member->groups, next_group) {
1233 		if (list->group == parent)
1234 			goto exist;
1235 	}
1236 	ret = -ENOENT;
1237 	goto out;
1238 exist:
1239 	ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1240 	err = del_qgroup_relation_item(trans, quota_root, dst, src);
1241 	if (err && !ret)
1242 		ret = err;
1243 
1244 	spin_lock(&fs_info->qgroup_lock);
1245 	del_relation_rb(fs_info, src, dst);
1246 	ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1247 	spin_unlock(&fs_info->qgroup_lock);
1248 out:
1249 	ulist_free(tmp);
1250 	return ret;
1251 }
1252 
1253 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1254 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1255 {
1256 	int ret = 0;
1257 
1258 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1259 	ret = __del_qgroup_relation(trans, fs_info, src, dst);
1260 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1261 
1262 	return ret;
1263 }
1264 
1265 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1266 			struct btrfs_fs_info *fs_info, u64 qgroupid)
1267 {
1268 	struct btrfs_root *quota_root;
1269 	struct btrfs_qgroup *qgroup;
1270 	int ret = 0;
1271 
1272 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1273 	quota_root = fs_info->quota_root;
1274 	if (!quota_root) {
1275 		ret = -EINVAL;
1276 		goto out;
1277 	}
1278 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1279 	if (qgroup) {
1280 		ret = -EEXIST;
1281 		goto out;
1282 	}
1283 
1284 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1285 	if (ret)
1286 		goto out;
1287 
1288 	spin_lock(&fs_info->qgroup_lock);
1289 	qgroup = add_qgroup_rb(fs_info, qgroupid);
1290 	spin_unlock(&fs_info->qgroup_lock);
1291 
1292 	if (IS_ERR(qgroup))
1293 		ret = PTR_ERR(qgroup);
1294 out:
1295 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1296 	return ret;
1297 }
1298 
1299 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1300 			struct btrfs_fs_info *fs_info, u64 qgroupid)
1301 {
1302 	struct btrfs_root *quota_root;
1303 	struct btrfs_qgroup *qgroup;
1304 	struct btrfs_qgroup_list *list;
1305 	int ret = 0;
1306 
1307 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1308 	quota_root = fs_info->quota_root;
1309 	if (!quota_root) {
1310 		ret = -EINVAL;
1311 		goto out;
1312 	}
1313 
1314 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1315 	if (!qgroup) {
1316 		ret = -ENOENT;
1317 		goto out;
1318 	} else {
1319 		/* check if there are no children of this qgroup */
1320 		if (!list_empty(&qgroup->members)) {
1321 			ret = -EBUSY;
1322 			goto out;
1323 		}
1324 	}
1325 	ret = del_qgroup_item(trans, quota_root, qgroupid);
1326 
1327 	while (!list_empty(&qgroup->groups)) {
1328 		list = list_first_entry(&qgroup->groups,
1329 					struct btrfs_qgroup_list, next_group);
1330 		ret = __del_qgroup_relation(trans, fs_info,
1331 					   qgroupid,
1332 					   list->group->qgroupid);
1333 		if (ret)
1334 			goto out;
1335 	}
1336 
1337 	spin_lock(&fs_info->qgroup_lock);
1338 	del_qgroup_rb(quota_root->fs_info, qgroupid);
1339 	spin_unlock(&fs_info->qgroup_lock);
1340 out:
1341 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1342 	return ret;
1343 }
1344 
1345 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1346 		       struct btrfs_fs_info *fs_info, u64 qgroupid,
1347 		       struct btrfs_qgroup_limit *limit)
1348 {
1349 	struct btrfs_root *quota_root;
1350 	struct btrfs_qgroup *qgroup;
1351 	int ret = 0;
1352 	/* Sometimes we would want to clear the limit on this qgroup.
1353 	 * To meet this requirement, we treat the -1 as a special value
1354 	 * which tell kernel to clear the limit on this qgroup.
1355 	 */
1356 	const u64 CLEAR_VALUE = -1;
1357 
1358 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1359 	quota_root = fs_info->quota_root;
1360 	if (!quota_root) {
1361 		ret = -EINVAL;
1362 		goto out;
1363 	}
1364 
1365 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1366 	if (!qgroup) {
1367 		ret = -ENOENT;
1368 		goto out;
1369 	}
1370 
1371 	spin_lock(&fs_info->qgroup_lock);
1372 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1373 		if (limit->max_rfer == CLEAR_VALUE) {
1374 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1375 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1376 			qgroup->max_rfer = 0;
1377 		} else {
1378 			qgroup->max_rfer = limit->max_rfer;
1379 		}
1380 	}
1381 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1382 		if (limit->max_excl == CLEAR_VALUE) {
1383 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1384 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1385 			qgroup->max_excl = 0;
1386 		} else {
1387 			qgroup->max_excl = limit->max_excl;
1388 		}
1389 	}
1390 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1391 		if (limit->rsv_rfer == CLEAR_VALUE) {
1392 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1393 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1394 			qgroup->rsv_rfer = 0;
1395 		} else {
1396 			qgroup->rsv_rfer = limit->rsv_rfer;
1397 		}
1398 	}
1399 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1400 		if (limit->rsv_excl == CLEAR_VALUE) {
1401 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1402 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1403 			qgroup->rsv_excl = 0;
1404 		} else {
1405 			qgroup->rsv_excl = limit->rsv_excl;
1406 		}
1407 	}
1408 	qgroup->lim_flags |= limit->flags;
1409 
1410 	spin_unlock(&fs_info->qgroup_lock);
1411 
1412 	ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1413 	if (ret) {
1414 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1415 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1416 		       qgroupid);
1417 	}
1418 
1419 out:
1420 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1421 	return ret;
1422 }
1423 
1424 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1425 					 struct btrfs_fs_info *fs_info)
1426 {
1427 	struct btrfs_qgroup_extent_record *record;
1428 	struct btrfs_delayed_ref_root *delayed_refs;
1429 	struct rb_node *node;
1430 	u64 qgroup_to_skip;
1431 	int ret = 0;
1432 
1433 	delayed_refs = &trans->transaction->delayed_refs;
1434 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
1435 
1436 	/*
1437 	 * No need to do lock, since this function will only be called in
1438 	 * btrfs_commmit_transaction().
1439 	 */
1440 	node = rb_first(&delayed_refs->dirty_extent_root);
1441 	while (node) {
1442 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
1443 				  node);
1444 		ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0,
1445 					   &record->old_roots);
1446 		if (ret < 0)
1447 			break;
1448 		if (qgroup_to_skip)
1449 			ulist_del(record->old_roots, qgroup_to_skip, 0);
1450 		node = rb_next(node);
1451 	}
1452 	return ret;
1453 }
1454 
1455 struct btrfs_qgroup_extent_record
1456 *btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
1457 				  struct btrfs_qgroup_extent_record *record)
1458 {
1459 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1460 	struct rb_node *parent_node = NULL;
1461 	struct btrfs_qgroup_extent_record *entry;
1462 	u64 bytenr = record->bytenr;
1463 
1464 	while (*p) {
1465 		parent_node = *p;
1466 		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1467 				 node);
1468 		if (bytenr < entry->bytenr)
1469 			p = &(*p)->rb_left;
1470 		else if (bytenr > entry->bytenr)
1471 			p = &(*p)->rb_right;
1472 		else
1473 			return entry;
1474 	}
1475 
1476 	rb_link_node(&record->node, parent_node, p);
1477 	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1478 	return NULL;
1479 }
1480 
1481 #define UPDATE_NEW	0
1482 #define UPDATE_OLD	1
1483 /*
1484  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1485  */
1486 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1487 				struct ulist *roots, struct ulist *tmp,
1488 				struct ulist *qgroups, u64 seq, int update_old)
1489 {
1490 	struct ulist_node *unode;
1491 	struct ulist_iterator uiter;
1492 	struct ulist_node *tmp_unode;
1493 	struct ulist_iterator tmp_uiter;
1494 	struct btrfs_qgroup *qg;
1495 	int ret = 0;
1496 
1497 	if (!roots)
1498 		return 0;
1499 	ULIST_ITER_INIT(&uiter);
1500 	while ((unode = ulist_next(roots, &uiter))) {
1501 		qg = find_qgroup_rb(fs_info, unode->val);
1502 		if (!qg)
1503 			continue;
1504 
1505 		ulist_reinit(tmp);
1506 		ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1507 				GFP_ATOMIC);
1508 		if (ret < 0)
1509 			return ret;
1510 		ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1511 		if (ret < 0)
1512 			return ret;
1513 		ULIST_ITER_INIT(&tmp_uiter);
1514 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1515 			struct btrfs_qgroup_list *glist;
1516 
1517 			qg = u64_to_ptr(tmp_unode->aux);
1518 			if (update_old)
1519 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1520 			else
1521 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1522 			list_for_each_entry(glist, &qg->groups, next_group) {
1523 				ret = ulist_add(qgroups, glist->group->qgroupid,
1524 						ptr_to_u64(glist->group),
1525 						GFP_ATOMIC);
1526 				if (ret < 0)
1527 					return ret;
1528 				ret = ulist_add(tmp, glist->group->qgroupid,
1529 						ptr_to_u64(glist->group),
1530 						GFP_ATOMIC);
1531 				if (ret < 0)
1532 					return ret;
1533 			}
1534 		}
1535 	}
1536 	return 0;
1537 }
1538 
1539 /*
1540  * Update qgroup rfer/excl counters.
1541  * Rfer update is easy, codes can explain themselves.
1542  *
1543  * Excl update is tricky, the update is split into 2 part.
1544  * Part 1: Possible exclusive <-> sharing detect:
1545  *	|	A	|	!A	|
1546  *  -------------------------------------
1547  *  B	|	*	|	-	|
1548  *  -------------------------------------
1549  *  !B	|	+	|	**	|
1550  *  -------------------------------------
1551  *
1552  * Conditions:
1553  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
1554  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
1555  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
1556  * !B:	cur_new_roots == nr_new_roots	(possible exclsuive now)
1557  *
1558  * Results:
1559  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
1560  * *: Definitely not changed.		**: Possible unchanged.
1561  *
1562  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1563  *
1564  * To make the logic clear, we first use condition A and B to split
1565  * combination into 4 results.
1566  *
1567  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1568  * only on variant maybe 0.
1569  *
1570  * Lastly, check result **, since there are 2 variants maybe 0, split them
1571  * again(2x2).
1572  * But this time we don't need to consider other things, the codes and logic
1573  * is easy to understand now.
1574  */
1575 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1576 				  struct ulist *qgroups,
1577 				  u64 nr_old_roots,
1578 				  u64 nr_new_roots,
1579 				  u64 num_bytes, u64 seq)
1580 {
1581 	struct ulist_node *unode;
1582 	struct ulist_iterator uiter;
1583 	struct btrfs_qgroup *qg;
1584 	u64 cur_new_count, cur_old_count;
1585 
1586 	ULIST_ITER_INIT(&uiter);
1587 	while ((unode = ulist_next(qgroups, &uiter))) {
1588 		bool dirty = false;
1589 
1590 		qg = u64_to_ptr(unode->aux);
1591 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1592 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1593 
1594 		/* Rfer update part */
1595 		if (cur_old_count == 0 && cur_new_count > 0) {
1596 			qg->rfer += num_bytes;
1597 			qg->rfer_cmpr += num_bytes;
1598 			dirty = true;
1599 		}
1600 		if (cur_old_count > 0 && cur_new_count == 0) {
1601 			qg->rfer -= num_bytes;
1602 			qg->rfer_cmpr -= num_bytes;
1603 			dirty = true;
1604 		}
1605 
1606 		/* Excl update part */
1607 		/* Exclusive/none -> shared case */
1608 		if (cur_old_count == nr_old_roots &&
1609 		    cur_new_count < nr_new_roots) {
1610 			/* Exclusive -> shared */
1611 			if (cur_old_count != 0) {
1612 				qg->excl -= num_bytes;
1613 				qg->excl_cmpr -= num_bytes;
1614 				dirty = true;
1615 			}
1616 		}
1617 
1618 		/* Shared -> exclusive/none case */
1619 		if (cur_old_count < nr_old_roots &&
1620 		    cur_new_count == nr_new_roots) {
1621 			/* Shared->exclusive */
1622 			if (cur_new_count != 0) {
1623 				qg->excl += num_bytes;
1624 				qg->excl_cmpr += num_bytes;
1625 				dirty = true;
1626 			}
1627 		}
1628 
1629 		/* Exclusive/none -> exclusive/none case */
1630 		if (cur_old_count == nr_old_roots &&
1631 		    cur_new_count == nr_new_roots) {
1632 			if (cur_old_count == 0) {
1633 				/* None -> exclusive/none */
1634 
1635 				if (cur_new_count != 0) {
1636 					/* None -> exclusive */
1637 					qg->excl += num_bytes;
1638 					qg->excl_cmpr += num_bytes;
1639 					dirty = true;
1640 				}
1641 				/* None -> none, nothing changed */
1642 			} else {
1643 				/* Exclusive -> exclusive/none */
1644 
1645 				if (cur_new_count == 0) {
1646 					/* Exclusive -> none */
1647 					qg->excl -= num_bytes;
1648 					qg->excl_cmpr -= num_bytes;
1649 					dirty = true;
1650 				}
1651 				/* Exclusive -> exclusive, nothing changed */
1652 			}
1653 		}
1654 
1655 		if (dirty)
1656 			qgroup_dirty(fs_info, qg);
1657 	}
1658 	return 0;
1659 }
1660 
1661 int
1662 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1663 			    struct btrfs_fs_info *fs_info,
1664 			    u64 bytenr, u64 num_bytes,
1665 			    struct ulist *old_roots, struct ulist *new_roots)
1666 {
1667 	struct ulist *qgroups = NULL;
1668 	struct ulist *tmp = NULL;
1669 	u64 seq;
1670 	u64 nr_new_roots = 0;
1671 	u64 nr_old_roots = 0;
1672 	int ret = 0;
1673 
1674 	if (new_roots)
1675 		nr_new_roots = new_roots->nnodes;
1676 	if (old_roots)
1677 		nr_old_roots = old_roots->nnodes;
1678 
1679 	if (!fs_info->quota_enabled)
1680 		goto out_free;
1681 	BUG_ON(!fs_info->quota_root);
1682 
1683 	qgroups = ulist_alloc(GFP_NOFS);
1684 	if (!qgroups) {
1685 		ret = -ENOMEM;
1686 		goto out_free;
1687 	}
1688 	tmp = ulist_alloc(GFP_NOFS);
1689 	if (!tmp) {
1690 		ret = -ENOMEM;
1691 		goto out_free;
1692 	}
1693 
1694 	mutex_lock(&fs_info->qgroup_rescan_lock);
1695 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1696 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
1697 			mutex_unlock(&fs_info->qgroup_rescan_lock);
1698 			ret = 0;
1699 			goto out_free;
1700 		}
1701 	}
1702 	mutex_unlock(&fs_info->qgroup_rescan_lock);
1703 
1704 	spin_lock(&fs_info->qgroup_lock);
1705 	seq = fs_info->qgroup_seq;
1706 
1707 	/* Update old refcnts using old_roots */
1708 	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
1709 				   UPDATE_OLD);
1710 	if (ret < 0)
1711 		goto out;
1712 
1713 	/* Update new refcnts using new_roots */
1714 	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
1715 				   UPDATE_NEW);
1716 	if (ret < 0)
1717 		goto out;
1718 
1719 	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
1720 			       num_bytes, seq);
1721 
1722 	/*
1723 	 * Bump qgroup_seq to avoid seq overlap
1724 	 */
1725 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
1726 out:
1727 	spin_unlock(&fs_info->qgroup_lock);
1728 out_free:
1729 	ulist_free(tmp);
1730 	ulist_free(qgroups);
1731 	ulist_free(old_roots);
1732 	ulist_free(new_roots);
1733 	return ret;
1734 }
1735 
1736 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
1737 				 struct btrfs_fs_info *fs_info)
1738 {
1739 	struct btrfs_qgroup_extent_record *record;
1740 	struct btrfs_delayed_ref_root *delayed_refs;
1741 	struct ulist *new_roots = NULL;
1742 	struct rb_node *node;
1743 	u64 qgroup_to_skip;
1744 	int ret = 0;
1745 
1746 	delayed_refs = &trans->transaction->delayed_refs;
1747 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
1748 	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
1749 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
1750 				  node);
1751 
1752 		if (!ret) {
1753 			/*
1754 			 * Use (u64)-1 as time_seq to do special search, which
1755 			 * doesn't lock tree or delayed_refs and search current
1756 			 * root. It's safe inside commit_transaction().
1757 			 */
1758 			ret = btrfs_find_all_roots(trans, fs_info,
1759 					record->bytenr, (u64)-1, &new_roots);
1760 			if (ret < 0)
1761 				goto cleanup;
1762 			if (qgroup_to_skip)
1763 				ulist_del(new_roots, qgroup_to_skip, 0);
1764 			ret = btrfs_qgroup_account_extent(trans, fs_info,
1765 					record->bytenr, record->num_bytes,
1766 					record->old_roots, new_roots);
1767 			record->old_roots = NULL;
1768 			new_roots = NULL;
1769 		}
1770 cleanup:
1771 		ulist_free(record->old_roots);
1772 		ulist_free(new_roots);
1773 		new_roots = NULL;
1774 		rb_erase(node, &delayed_refs->dirty_extent_root);
1775 		kfree(record);
1776 
1777 	}
1778 	return ret;
1779 }
1780 
1781 /*
1782  * called from commit_transaction. Writes all changed qgroups to disk.
1783  */
1784 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1785 		      struct btrfs_fs_info *fs_info)
1786 {
1787 	struct btrfs_root *quota_root = fs_info->quota_root;
1788 	int ret = 0;
1789 	int start_rescan_worker = 0;
1790 
1791 	if (!quota_root)
1792 		goto out;
1793 
1794 	if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1795 		start_rescan_worker = 1;
1796 
1797 	fs_info->quota_enabled = fs_info->pending_quota_state;
1798 
1799 	spin_lock(&fs_info->qgroup_lock);
1800 	while (!list_empty(&fs_info->dirty_qgroups)) {
1801 		struct btrfs_qgroup *qgroup;
1802 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
1803 					  struct btrfs_qgroup, dirty);
1804 		list_del_init(&qgroup->dirty);
1805 		spin_unlock(&fs_info->qgroup_lock);
1806 		ret = update_qgroup_info_item(trans, quota_root, qgroup);
1807 		if (ret)
1808 			fs_info->qgroup_flags |=
1809 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1810 		ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1811 		if (ret)
1812 			fs_info->qgroup_flags |=
1813 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1814 		spin_lock(&fs_info->qgroup_lock);
1815 	}
1816 	if (fs_info->quota_enabled)
1817 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1818 	else
1819 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1820 	spin_unlock(&fs_info->qgroup_lock);
1821 
1822 	ret = update_qgroup_status_item(trans, fs_info, quota_root);
1823 	if (ret)
1824 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1825 
1826 	if (!ret && start_rescan_worker) {
1827 		ret = qgroup_rescan_init(fs_info, 0, 1);
1828 		if (!ret) {
1829 			qgroup_rescan_zero_tracking(fs_info);
1830 			btrfs_queue_work(fs_info->qgroup_rescan_workers,
1831 					 &fs_info->qgroup_rescan_work);
1832 		}
1833 		ret = 0;
1834 	}
1835 
1836 out:
1837 
1838 	return ret;
1839 }
1840 
1841 /*
1842  * copy the acounting information between qgroups. This is necessary when a
1843  * snapshot or a subvolume is created
1844  */
1845 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1846 			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1847 			 struct btrfs_qgroup_inherit *inherit)
1848 {
1849 	int ret = 0;
1850 	int i;
1851 	u64 *i_qgroups;
1852 	struct btrfs_root *quota_root = fs_info->quota_root;
1853 	struct btrfs_qgroup *srcgroup;
1854 	struct btrfs_qgroup *dstgroup;
1855 	u32 level_size = 0;
1856 	u64 nums;
1857 
1858 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1859 	if (!fs_info->quota_enabled)
1860 		goto out;
1861 
1862 	if (!quota_root) {
1863 		ret = -EINVAL;
1864 		goto out;
1865 	}
1866 
1867 	if (inherit) {
1868 		i_qgroups = (u64 *)(inherit + 1);
1869 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1870 		       2 * inherit->num_excl_copies;
1871 		for (i = 0; i < nums; ++i) {
1872 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1873 			if (!srcgroup) {
1874 				ret = -EINVAL;
1875 				goto out;
1876 			}
1877 
1878 			if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
1879 				ret = -EINVAL;
1880 				goto out;
1881 			}
1882 			++i_qgroups;
1883 		}
1884 	}
1885 
1886 	/*
1887 	 * create a tracking group for the subvol itself
1888 	 */
1889 	ret = add_qgroup_item(trans, quota_root, objectid);
1890 	if (ret)
1891 		goto out;
1892 
1893 	if (srcid) {
1894 		struct btrfs_root *srcroot;
1895 		struct btrfs_key srckey;
1896 
1897 		srckey.objectid = srcid;
1898 		srckey.type = BTRFS_ROOT_ITEM_KEY;
1899 		srckey.offset = (u64)-1;
1900 		srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1901 		if (IS_ERR(srcroot)) {
1902 			ret = PTR_ERR(srcroot);
1903 			goto out;
1904 		}
1905 
1906 		rcu_read_lock();
1907 		level_size = srcroot->nodesize;
1908 		rcu_read_unlock();
1909 	}
1910 
1911 	/*
1912 	 * add qgroup to all inherited groups
1913 	 */
1914 	if (inherit) {
1915 		i_qgroups = (u64 *)(inherit + 1);
1916 		for (i = 0; i < inherit->num_qgroups; ++i) {
1917 			ret = add_qgroup_relation_item(trans, quota_root,
1918 						       objectid, *i_qgroups);
1919 			if (ret)
1920 				goto out;
1921 			ret = add_qgroup_relation_item(trans, quota_root,
1922 						       *i_qgroups, objectid);
1923 			if (ret)
1924 				goto out;
1925 			++i_qgroups;
1926 		}
1927 	}
1928 
1929 
1930 	spin_lock(&fs_info->qgroup_lock);
1931 
1932 	dstgroup = add_qgroup_rb(fs_info, objectid);
1933 	if (IS_ERR(dstgroup)) {
1934 		ret = PTR_ERR(dstgroup);
1935 		goto unlock;
1936 	}
1937 
1938 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1939 		dstgroup->lim_flags = inherit->lim.flags;
1940 		dstgroup->max_rfer = inherit->lim.max_rfer;
1941 		dstgroup->max_excl = inherit->lim.max_excl;
1942 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
1943 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
1944 
1945 		ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
1946 		if (ret) {
1947 			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1948 			btrfs_info(fs_info, "unable to update quota limit for %llu",
1949 			       dstgroup->qgroupid);
1950 			goto unlock;
1951 		}
1952 	}
1953 
1954 	if (srcid) {
1955 		srcgroup = find_qgroup_rb(fs_info, srcid);
1956 		if (!srcgroup)
1957 			goto unlock;
1958 
1959 		/*
1960 		 * We call inherit after we clone the root in order to make sure
1961 		 * our counts don't go crazy, so at this point the only
1962 		 * difference between the two roots should be the root node.
1963 		 */
1964 		dstgroup->rfer = srcgroup->rfer;
1965 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
1966 		dstgroup->excl = level_size;
1967 		dstgroup->excl_cmpr = level_size;
1968 		srcgroup->excl = level_size;
1969 		srcgroup->excl_cmpr = level_size;
1970 
1971 		/* inherit the limit info */
1972 		dstgroup->lim_flags = srcgroup->lim_flags;
1973 		dstgroup->max_rfer = srcgroup->max_rfer;
1974 		dstgroup->max_excl = srcgroup->max_excl;
1975 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
1976 		dstgroup->rsv_excl = srcgroup->rsv_excl;
1977 
1978 		qgroup_dirty(fs_info, dstgroup);
1979 		qgroup_dirty(fs_info, srcgroup);
1980 	}
1981 
1982 	if (!inherit)
1983 		goto unlock;
1984 
1985 	i_qgroups = (u64 *)(inherit + 1);
1986 	for (i = 0; i < inherit->num_qgroups; ++i) {
1987 		ret = add_relation_rb(quota_root->fs_info, objectid,
1988 				      *i_qgroups);
1989 		if (ret)
1990 			goto unlock;
1991 		++i_qgroups;
1992 	}
1993 
1994 	for (i = 0; i <  inherit->num_ref_copies; ++i) {
1995 		struct btrfs_qgroup *src;
1996 		struct btrfs_qgroup *dst;
1997 
1998 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
1999 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2000 
2001 		if (!src || !dst) {
2002 			ret = -EINVAL;
2003 			goto unlock;
2004 		}
2005 
2006 		dst->rfer = src->rfer - level_size;
2007 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2008 		i_qgroups += 2;
2009 	}
2010 	for (i = 0; i <  inherit->num_excl_copies; ++i) {
2011 		struct btrfs_qgroup *src;
2012 		struct btrfs_qgroup *dst;
2013 
2014 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2015 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2016 
2017 		if (!src || !dst) {
2018 			ret = -EINVAL;
2019 			goto unlock;
2020 		}
2021 
2022 		dst->excl = src->excl + level_size;
2023 		dst->excl_cmpr = src->excl_cmpr + level_size;
2024 		i_qgroups += 2;
2025 	}
2026 
2027 unlock:
2028 	spin_unlock(&fs_info->qgroup_lock);
2029 out:
2030 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2031 	return ret;
2032 }
2033 
2034 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2035 {
2036 	struct btrfs_root *quota_root;
2037 	struct btrfs_qgroup *qgroup;
2038 	struct btrfs_fs_info *fs_info = root->fs_info;
2039 	u64 ref_root = root->root_key.objectid;
2040 	int ret = 0;
2041 	struct ulist_node *unode;
2042 	struct ulist_iterator uiter;
2043 
2044 	if (!is_fstree(ref_root))
2045 		return 0;
2046 
2047 	if (num_bytes == 0)
2048 		return 0;
2049 
2050 	spin_lock(&fs_info->qgroup_lock);
2051 	quota_root = fs_info->quota_root;
2052 	if (!quota_root)
2053 		goto out;
2054 
2055 	qgroup = find_qgroup_rb(fs_info, ref_root);
2056 	if (!qgroup)
2057 		goto out;
2058 
2059 	/*
2060 	 * in a first step, we check all affected qgroups if any limits would
2061 	 * be exceeded
2062 	 */
2063 	ulist_reinit(fs_info->qgroup_ulist);
2064 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2065 			(uintptr_t)qgroup, GFP_ATOMIC);
2066 	if (ret < 0)
2067 		goto out;
2068 	ULIST_ITER_INIT(&uiter);
2069 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2070 		struct btrfs_qgroup *qg;
2071 		struct btrfs_qgroup_list *glist;
2072 
2073 		qg = u64_to_ptr(unode->aux);
2074 
2075 		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2076 		    qg->reserved + (s64)qg->rfer + num_bytes >
2077 		    qg->max_rfer) {
2078 			ret = -EDQUOT;
2079 			goto out;
2080 		}
2081 
2082 		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2083 		    qg->reserved + (s64)qg->excl + num_bytes >
2084 		    qg->max_excl) {
2085 			ret = -EDQUOT;
2086 			goto out;
2087 		}
2088 
2089 		list_for_each_entry(glist, &qg->groups, next_group) {
2090 			ret = ulist_add(fs_info->qgroup_ulist,
2091 					glist->group->qgroupid,
2092 					(uintptr_t)glist->group, GFP_ATOMIC);
2093 			if (ret < 0)
2094 				goto out;
2095 		}
2096 	}
2097 	ret = 0;
2098 	/*
2099 	 * no limits exceeded, now record the reservation into all qgroups
2100 	 */
2101 	ULIST_ITER_INIT(&uiter);
2102 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2103 		struct btrfs_qgroup *qg;
2104 
2105 		qg = u64_to_ptr(unode->aux);
2106 
2107 		qg->reserved += num_bytes;
2108 	}
2109 
2110 out:
2111 	spin_unlock(&fs_info->qgroup_lock);
2112 	return ret;
2113 }
2114 
2115 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2116 			       u64 ref_root, u64 num_bytes)
2117 {
2118 	struct btrfs_root *quota_root;
2119 	struct btrfs_qgroup *qgroup;
2120 	struct ulist_node *unode;
2121 	struct ulist_iterator uiter;
2122 	int ret = 0;
2123 
2124 	if (!is_fstree(ref_root))
2125 		return;
2126 
2127 	if (num_bytes == 0)
2128 		return;
2129 
2130 	spin_lock(&fs_info->qgroup_lock);
2131 
2132 	quota_root = fs_info->quota_root;
2133 	if (!quota_root)
2134 		goto out;
2135 
2136 	qgroup = find_qgroup_rb(fs_info, ref_root);
2137 	if (!qgroup)
2138 		goto out;
2139 
2140 	ulist_reinit(fs_info->qgroup_ulist);
2141 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2142 			(uintptr_t)qgroup, GFP_ATOMIC);
2143 	if (ret < 0)
2144 		goto out;
2145 	ULIST_ITER_INIT(&uiter);
2146 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2147 		struct btrfs_qgroup *qg;
2148 		struct btrfs_qgroup_list *glist;
2149 
2150 		qg = u64_to_ptr(unode->aux);
2151 
2152 		qg->reserved -= num_bytes;
2153 
2154 		list_for_each_entry(glist, &qg->groups, next_group) {
2155 			ret = ulist_add(fs_info->qgroup_ulist,
2156 					glist->group->qgroupid,
2157 					(uintptr_t)glist->group, GFP_ATOMIC);
2158 			if (ret < 0)
2159 				goto out;
2160 		}
2161 	}
2162 
2163 out:
2164 	spin_unlock(&fs_info->qgroup_lock);
2165 }
2166 
2167 static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes)
2168 {
2169 	return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
2170 					 num_bytes);
2171 }
2172 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2173 {
2174 	if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2175 		return;
2176 	btrfs_err(trans->root->fs_info,
2177 		"qgroups not uptodate in trans handle %p:  list is%s empty, "
2178 		"seq is %#x.%x",
2179 		trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2180 		(u32)(trans->delayed_ref_elem.seq >> 32),
2181 		(u32)trans->delayed_ref_elem.seq);
2182 	BUG();
2183 }
2184 
2185 /*
2186  * returns < 0 on error, 0 when more leafs are to be scanned.
2187  * returns 1 when done.
2188  */
2189 static int
2190 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2191 		   struct btrfs_trans_handle *trans)
2192 {
2193 	struct btrfs_key found;
2194 	struct extent_buffer *scratch_leaf = NULL;
2195 	struct ulist *roots = NULL;
2196 	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2197 	u64 num_bytes;
2198 	int slot;
2199 	int ret;
2200 
2201 	mutex_lock(&fs_info->qgroup_rescan_lock);
2202 	ret = btrfs_search_slot_for_read(fs_info->extent_root,
2203 					 &fs_info->qgroup_rescan_progress,
2204 					 path, 1, 0);
2205 
2206 	pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2207 		 fs_info->qgroup_rescan_progress.objectid,
2208 		 fs_info->qgroup_rescan_progress.type,
2209 		 fs_info->qgroup_rescan_progress.offset, ret);
2210 
2211 	if (ret) {
2212 		/*
2213 		 * The rescan is about to end, we will not be scanning any
2214 		 * further blocks. We cannot unset the RESCAN flag here, because
2215 		 * we want to commit the transaction if everything went well.
2216 		 * To make the live accounting work in this phase, we set our
2217 		 * scan progress pointer such that every real extent objectid
2218 		 * will be smaller.
2219 		 */
2220 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2221 		btrfs_release_path(path);
2222 		mutex_unlock(&fs_info->qgroup_rescan_lock);
2223 		return ret;
2224 	}
2225 
2226 	btrfs_item_key_to_cpu(path->nodes[0], &found,
2227 			      btrfs_header_nritems(path->nodes[0]) - 1);
2228 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2229 
2230 	btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2231 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2232 	if (!scratch_leaf) {
2233 		ret = -ENOMEM;
2234 		mutex_unlock(&fs_info->qgroup_rescan_lock);
2235 		goto out;
2236 	}
2237 	extent_buffer_get(scratch_leaf);
2238 	btrfs_tree_read_lock(scratch_leaf);
2239 	btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2240 	slot = path->slots[0];
2241 	btrfs_release_path(path);
2242 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2243 
2244 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2245 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2246 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2247 		    found.type != BTRFS_METADATA_ITEM_KEY)
2248 			continue;
2249 		if (found.type == BTRFS_METADATA_ITEM_KEY)
2250 			num_bytes = fs_info->extent_root->nodesize;
2251 		else
2252 			num_bytes = found.offset;
2253 
2254 		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2255 					   &roots);
2256 		if (ret < 0)
2257 			goto out;
2258 		/* For rescan, just pass old_roots as NULL */
2259 		ret = btrfs_qgroup_account_extent(trans, fs_info,
2260 				found.objectid, num_bytes, NULL, roots);
2261 		if (ret < 0)
2262 			goto out;
2263 	}
2264 out:
2265 	if (scratch_leaf) {
2266 		btrfs_tree_read_unlock_blocking(scratch_leaf);
2267 		free_extent_buffer(scratch_leaf);
2268 	}
2269 	btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2270 
2271 	return ret;
2272 }
2273 
2274 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2275 {
2276 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2277 						     qgroup_rescan_work);
2278 	struct btrfs_path *path;
2279 	struct btrfs_trans_handle *trans = NULL;
2280 	int err = -ENOMEM;
2281 	int ret = 0;
2282 
2283 	path = btrfs_alloc_path();
2284 	if (!path)
2285 		goto out;
2286 
2287 	err = 0;
2288 	while (!err && !btrfs_fs_closing(fs_info)) {
2289 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
2290 		if (IS_ERR(trans)) {
2291 			err = PTR_ERR(trans);
2292 			break;
2293 		}
2294 		if (!fs_info->quota_enabled) {
2295 			err = -EINTR;
2296 		} else {
2297 			err = qgroup_rescan_leaf(fs_info, path, trans);
2298 		}
2299 		if (err > 0)
2300 			btrfs_commit_transaction(trans, fs_info->fs_root);
2301 		else
2302 			btrfs_end_transaction(trans, fs_info->fs_root);
2303 	}
2304 
2305 out:
2306 	btrfs_free_path(path);
2307 
2308 	mutex_lock(&fs_info->qgroup_rescan_lock);
2309 	if (!btrfs_fs_closing(fs_info))
2310 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2311 
2312 	if (err > 0 &&
2313 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2314 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2315 	} else if (err < 0) {
2316 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2317 	}
2318 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2319 
2320 	/*
2321 	 * only update status, since the previous part has alreay updated the
2322 	 * qgroup info.
2323 	 */
2324 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
2325 	if (IS_ERR(trans)) {
2326 		err = PTR_ERR(trans);
2327 		btrfs_err(fs_info,
2328 			  "fail to start transaction for status update: %d\n",
2329 			  err);
2330 		goto done;
2331 	}
2332 	ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2333 	if (ret < 0) {
2334 		err = ret;
2335 		btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
2336 	}
2337 	btrfs_end_transaction(trans, fs_info->quota_root);
2338 
2339 	if (btrfs_fs_closing(fs_info)) {
2340 		btrfs_info(fs_info, "qgroup scan paused");
2341 	} else if (err >= 0) {
2342 		btrfs_info(fs_info, "qgroup scan completed%s",
2343 			err > 0 ? " (inconsistency flag cleared)" : "");
2344 	} else {
2345 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
2346 	}
2347 
2348 done:
2349 	complete_all(&fs_info->qgroup_rescan_completion);
2350 }
2351 
2352 /*
2353  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2354  * memory required for the rescan context.
2355  */
2356 static int
2357 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2358 		   int init_flags)
2359 {
2360 	int ret = 0;
2361 
2362 	if (!init_flags &&
2363 	    (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2364 	     !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2365 		ret = -EINVAL;
2366 		goto err;
2367 	}
2368 
2369 	mutex_lock(&fs_info->qgroup_rescan_lock);
2370 	spin_lock(&fs_info->qgroup_lock);
2371 
2372 	if (init_flags) {
2373 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2374 			ret = -EINPROGRESS;
2375 		else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2376 			ret = -EINVAL;
2377 
2378 		if (ret) {
2379 			spin_unlock(&fs_info->qgroup_lock);
2380 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2381 			goto err;
2382 		}
2383 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2384 	}
2385 
2386 	memset(&fs_info->qgroup_rescan_progress, 0,
2387 		sizeof(fs_info->qgroup_rescan_progress));
2388 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2389 	init_completion(&fs_info->qgroup_rescan_completion);
2390 
2391 	spin_unlock(&fs_info->qgroup_lock);
2392 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2393 
2394 	memset(&fs_info->qgroup_rescan_work, 0,
2395 	       sizeof(fs_info->qgroup_rescan_work));
2396 	btrfs_init_work(&fs_info->qgroup_rescan_work,
2397 			btrfs_qgroup_rescan_helper,
2398 			btrfs_qgroup_rescan_worker, NULL, NULL);
2399 
2400 	if (ret) {
2401 err:
2402 		btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2403 		return ret;
2404 	}
2405 
2406 	return 0;
2407 }
2408 
2409 static void
2410 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2411 {
2412 	struct rb_node *n;
2413 	struct btrfs_qgroup *qgroup;
2414 
2415 	spin_lock(&fs_info->qgroup_lock);
2416 	/* clear all current qgroup tracking information */
2417 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2418 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
2419 		qgroup->rfer = 0;
2420 		qgroup->rfer_cmpr = 0;
2421 		qgroup->excl = 0;
2422 		qgroup->excl_cmpr = 0;
2423 	}
2424 	spin_unlock(&fs_info->qgroup_lock);
2425 }
2426 
2427 int
2428 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2429 {
2430 	int ret = 0;
2431 	struct btrfs_trans_handle *trans;
2432 
2433 	ret = qgroup_rescan_init(fs_info, 0, 1);
2434 	if (ret)
2435 		return ret;
2436 
2437 	/*
2438 	 * We have set the rescan_progress to 0, which means no more
2439 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2440 	 * However, btrfs_qgroup_account_ref may be right after its call
2441 	 * to btrfs_find_all_roots, in which case it would still do the
2442 	 * accounting.
2443 	 * To solve this, we're committing the transaction, which will
2444 	 * ensure we run all delayed refs and only after that, we are
2445 	 * going to clear all tracking information for a clean start.
2446 	 */
2447 
2448 	trans = btrfs_join_transaction(fs_info->fs_root);
2449 	if (IS_ERR(trans)) {
2450 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2451 		return PTR_ERR(trans);
2452 	}
2453 	ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2454 	if (ret) {
2455 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2456 		return ret;
2457 	}
2458 
2459 	qgroup_rescan_zero_tracking(fs_info);
2460 
2461 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
2462 			 &fs_info->qgroup_rescan_work);
2463 
2464 	return 0;
2465 }
2466 
2467 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2468 {
2469 	int running;
2470 	int ret = 0;
2471 
2472 	mutex_lock(&fs_info->qgroup_rescan_lock);
2473 	spin_lock(&fs_info->qgroup_lock);
2474 	running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2475 	spin_unlock(&fs_info->qgroup_lock);
2476 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2477 
2478 	if (running)
2479 		ret = wait_for_completion_interruptible(
2480 					&fs_info->qgroup_rescan_completion);
2481 
2482 	return ret;
2483 }
2484 
2485 /*
2486  * this is only called from open_ctree where we're still single threaded, thus
2487  * locking is omitted here.
2488  */
2489 void
2490 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2491 {
2492 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2493 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
2494 				 &fs_info->qgroup_rescan_work);
2495 }
2496 
2497 /*
2498  * Reserve qgroup space for range [start, start + len).
2499  *
2500  * This function will either reserve space from related qgroups or doing
2501  * nothing if the range is already reserved.
2502  *
2503  * Return 0 for successful reserve
2504  * Return <0 for error (including -EQUOT)
2505  *
2506  * NOTE: this function may sleep for memory allocation.
2507  */
2508 int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2509 {
2510 	struct btrfs_root *root = BTRFS_I(inode)->root;
2511 	struct extent_changeset changeset;
2512 	struct ulist_node *unode;
2513 	struct ulist_iterator uiter;
2514 	int ret;
2515 
2516 	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
2517 	    len == 0)
2518 		return 0;
2519 
2520 	changeset.bytes_changed = 0;
2521 	changeset.range_changed = ulist_alloc(GFP_NOFS);
2522 	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2523 			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
2524 			&changeset);
2525 	trace_btrfs_qgroup_reserve_data(inode, start, len,
2526 					changeset.bytes_changed,
2527 					QGROUP_RESERVE);
2528 	if (ret < 0)
2529 		goto cleanup;
2530 	ret = qgroup_reserve(root, changeset.bytes_changed);
2531 	if (ret < 0)
2532 		goto cleanup;
2533 
2534 	ulist_free(changeset.range_changed);
2535 	return ret;
2536 
2537 cleanup:
2538 	/* cleanup already reserved ranges */
2539 	ULIST_ITER_INIT(&uiter);
2540 	while ((unode = ulist_next(changeset.range_changed, &uiter)))
2541 		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2542 				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
2543 				 GFP_NOFS);
2544 	ulist_free(changeset.range_changed);
2545 	return ret;
2546 }
2547 
2548 static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2549 				       int free)
2550 {
2551 	struct extent_changeset changeset;
2552 	int trace_op = QGROUP_RELEASE;
2553 	int ret;
2554 
2555 	changeset.bytes_changed = 0;
2556 	changeset.range_changed = ulist_alloc(GFP_NOFS);
2557 	if (!changeset.range_changed)
2558 		return -ENOMEM;
2559 
2560 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2561 			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
2562 			&changeset);
2563 	if (ret < 0)
2564 		goto out;
2565 
2566 	if (free) {
2567 		qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2568 		trace_op = QGROUP_FREE;
2569 	}
2570 	trace_btrfs_qgroup_release_data(inode, start, len,
2571 					changeset.bytes_changed, trace_op);
2572 out:
2573 	ulist_free(changeset.range_changed);
2574 	return ret;
2575 }
2576 
2577 /*
2578  * Free a reserved space range from io_tree and related qgroups
2579  *
2580  * Should be called when a range of pages get invalidated before reaching disk.
2581  * Or for error cleanup case.
2582  *
2583  * For data written to disk, use btrfs_qgroup_release_data().
2584  *
2585  * NOTE: This function may sleep for memory allocation.
2586  */
2587 int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
2588 {
2589 	return __btrfs_qgroup_release_data(inode, start, len, 1);
2590 }
2591 
2592 /*
2593  * Release a reserved space range from io_tree only.
2594  *
2595  * Should be called when a range of pages get written to disk and corresponding
2596  * FILE_EXTENT is inserted into corresponding root.
2597  *
2598  * Since new qgroup accounting framework will only update qgroup numbers at
2599  * commit_transaction() time, its reserved space shouldn't be freed from
2600  * related qgroups.
2601  *
2602  * But we should release the range from io_tree, to allow further write to be
2603  * COWed.
2604  *
2605  * NOTE: This function may sleep for memory allocation.
2606  */
2607 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
2608 {
2609 	return __btrfs_qgroup_release_data(inode, start, len, 0);
2610 }
2611 
2612 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
2613 {
2614 	int ret;
2615 
2616 	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
2617 	    num_bytes == 0)
2618 		return 0;
2619 
2620 	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2621 	ret = qgroup_reserve(root, num_bytes);
2622 	if (ret < 0)
2623 		return ret;
2624 	atomic_add(num_bytes, &root->qgroup_meta_rsv);
2625 	return ret;
2626 }
2627 
2628 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2629 {
2630 	int reserved;
2631 
2632 	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
2633 		return;
2634 
2635 	reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
2636 	if (reserved == 0)
2637 		return;
2638 	qgroup_free(root, reserved);
2639 }
2640 
2641 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2642 {
2643 	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
2644 		return;
2645 
2646 	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2647 	WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
2648 	atomic_sub(num_bytes, &root->qgroup_meta_rsv);
2649 	qgroup_free(root, num_bytes);
2650 }
2651 
2652 /*
2653  * Check qgroup reserved space leaking, normally at destory inode
2654  * time
2655  */
2656 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2657 {
2658 	struct extent_changeset changeset;
2659 	struct ulist_node *unode;
2660 	struct ulist_iterator iter;
2661 	int ret;
2662 
2663 	changeset.bytes_changed = 0;
2664 	changeset.range_changed = ulist_alloc(GFP_NOFS);
2665 	if (WARN_ON(!changeset.range_changed))
2666 		return;
2667 
2668 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
2669 			EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset);
2670 
2671 	WARN_ON(ret < 0);
2672 	if (WARN_ON(changeset.bytes_changed)) {
2673 		ULIST_ITER_INIT(&iter);
2674 		while ((unode = ulist_next(changeset.range_changed, &iter))) {
2675 			btrfs_warn(BTRFS_I(inode)->root->fs_info,
2676 				"leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
2677 				inode->i_ino, unode->val, unode->aux);
2678 		}
2679 		qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2680 	}
2681 	ulist_free(changeset.range_changed);
2682 }
2683