1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include <linux/workqueue.h> 26 #include <linux/btrfs.h> 27 28 #include "ctree.h" 29 #include "transaction.h" 30 #include "disk-io.h" 31 #include "locking.h" 32 #include "ulist.h" 33 #include "backref.h" 34 #include "extent_io.h" 35 #include "qgroup.h" 36 37 38 /* TODO XXX FIXME 39 * - subvol delete -> delete when ref goes to 0? delete limits also? 40 * - reorganize keys 41 * - compressed 42 * - sync 43 * - copy also limits on subvol creation 44 * - limit 45 * - caches fuer ulists 46 * - performance benchmarks 47 * - check all ioctl parameters 48 */ 49 50 /* 51 * Helpers to access qgroup reservation 52 * 53 * Callers should ensure the lock context and type are valid 54 */ 55 56 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 57 { 58 u64 ret = 0; 59 int i; 60 61 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 62 ret += qgroup->rsv.values[i]; 63 64 return ret; 65 } 66 67 #ifdef CONFIG_BTRFS_DEBUG 68 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 69 { 70 if (type == BTRFS_QGROUP_RSV_DATA) 71 return "data"; 72 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 73 return "meta_pertrans"; 74 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 75 return "meta_prealloc"; 76 return NULL; 77 } 78 #endif 79 80 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 81 struct btrfs_qgroup *qgroup, u64 num_bytes, 82 enum btrfs_qgroup_rsv_type type) 83 { 84 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 85 qgroup->rsv.values[type] += num_bytes; 86 } 87 88 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 89 struct btrfs_qgroup *qgroup, u64 num_bytes, 90 enum btrfs_qgroup_rsv_type type) 91 { 92 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 93 if (qgroup->rsv.values[type] >= num_bytes) { 94 qgroup->rsv.values[type] -= num_bytes; 95 return; 96 } 97 #ifdef CONFIG_BTRFS_DEBUG 98 WARN_RATELIMIT(1, 99 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 100 qgroup->qgroupid, qgroup_rsv_type_str(type), 101 qgroup->rsv.values[type], num_bytes); 102 #endif 103 qgroup->rsv.values[type] = 0; 104 } 105 106 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 107 struct btrfs_qgroup *dest, 108 struct btrfs_qgroup *src) 109 { 110 int i; 111 112 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 113 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 114 } 115 116 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 117 struct btrfs_qgroup *dest, 118 struct btrfs_qgroup *src) 119 { 120 int i; 121 122 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 123 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 124 } 125 126 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 127 int mod) 128 { 129 if (qg->old_refcnt < seq) 130 qg->old_refcnt = seq; 131 qg->old_refcnt += mod; 132 } 133 134 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 135 int mod) 136 { 137 if (qg->new_refcnt < seq) 138 qg->new_refcnt = seq; 139 qg->new_refcnt += mod; 140 } 141 142 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 143 { 144 if (qg->old_refcnt < seq) 145 return 0; 146 return qg->old_refcnt - seq; 147 } 148 149 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 150 { 151 if (qg->new_refcnt < seq) 152 return 0; 153 return qg->new_refcnt - seq; 154 } 155 156 /* 157 * glue structure to represent the relations between qgroups. 158 */ 159 struct btrfs_qgroup_list { 160 struct list_head next_group; 161 struct list_head next_member; 162 struct btrfs_qgroup *group; 163 struct btrfs_qgroup *member; 164 }; 165 166 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) 167 { 168 return (u64)(uintptr_t)qg; 169 } 170 171 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) 172 { 173 return (struct btrfs_qgroup *)(uintptr_t)n->aux; 174 } 175 176 static int 177 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 178 int init_flags); 179 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 180 181 /* must be called with qgroup_ioctl_lock held */ 182 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 183 u64 qgroupid) 184 { 185 struct rb_node *n = fs_info->qgroup_tree.rb_node; 186 struct btrfs_qgroup *qgroup; 187 188 while (n) { 189 qgroup = rb_entry(n, struct btrfs_qgroup, node); 190 if (qgroup->qgroupid < qgroupid) 191 n = n->rb_left; 192 else if (qgroup->qgroupid > qgroupid) 193 n = n->rb_right; 194 else 195 return qgroup; 196 } 197 return NULL; 198 } 199 200 /* must be called with qgroup_lock held */ 201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 202 u64 qgroupid) 203 { 204 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 205 struct rb_node *parent = NULL; 206 struct btrfs_qgroup *qgroup; 207 208 while (*p) { 209 parent = *p; 210 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 211 212 if (qgroup->qgroupid < qgroupid) 213 p = &(*p)->rb_left; 214 else if (qgroup->qgroupid > qgroupid) 215 p = &(*p)->rb_right; 216 else 217 return qgroup; 218 } 219 220 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 221 if (!qgroup) 222 return ERR_PTR(-ENOMEM); 223 224 qgroup->qgroupid = qgroupid; 225 INIT_LIST_HEAD(&qgroup->groups); 226 INIT_LIST_HEAD(&qgroup->members); 227 INIT_LIST_HEAD(&qgroup->dirty); 228 229 rb_link_node(&qgroup->node, parent, p); 230 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 231 232 return qgroup; 233 } 234 235 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) 236 { 237 struct btrfs_qgroup_list *list; 238 239 list_del(&qgroup->dirty); 240 while (!list_empty(&qgroup->groups)) { 241 list = list_first_entry(&qgroup->groups, 242 struct btrfs_qgroup_list, next_group); 243 list_del(&list->next_group); 244 list_del(&list->next_member); 245 kfree(list); 246 } 247 248 while (!list_empty(&qgroup->members)) { 249 list = list_first_entry(&qgroup->members, 250 struct btrfs_qgroup_list, next_member); 251 list_del(&list->next_group); 252 list_del(&list->next_member); 253 kfree(list); 254 } 255 kfree(qgroup); 256 } 257 258 /* must be called with qgroup_lock held */ 259 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 260 { 261 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 262 263 if (!qgroup) 264 return -ENOENT; 265 266 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 267 __del_qgroup_rb(qgroup); 268 return 0; 269 } 270 271 /* must be called with qgroup_lock held */ 272 static int add_relation_rb(struct btrfs_fs_info *fs_info, 273 u64 memberid, u64 parentid) 274 { 275 struct btrfs_qgroup *member; 276 struct btrfs_qgroup *parent; 277 struct btrfs_qgroup_list *list; 278 279 member = find_qgroup_rb(fs_info, memberid); 280 parent = find_qgroup_rb(fs_info, parentid); 281 if (!member || !parent) 282 return -ENOENT; 283 284 list = kzalloc(sizeof(*list), GFP_ATOMIC); 285 if (!list) 286 return -ENOMEM; 287 288 list->group = parent; 289 list->member = member; 290 list_add_tail(&list->next_group, &member->groups); 291 list_add_tail(&list->next_member, &parent->members); 292 293 return 0; 294 } 295 296 /* must be called with qgroup_lock held */ 297 static int del_relation_rb(struct btrfs_fs_info *fs_info, 298 u64 memberid, u64 parentid) 299 { 300 struct btrfs_qgroup *member; 301 struct btrfs_qgroup *parent; 302 struct btrfs_qgroup_list *list; 303 304 member = find_qgroup_rb(fs_info, memberid); 305 parent = find_qgroup_rb(fs_info, parentid); 306 if (!member || !parent) 307 return -ENOENT; 308 309 list_for_each_entry(list, &member->groups, next_group) { 310 if (list->group == parent) { 311 list_del(&list->next_group); 312 list_del(&list->next_member); 313 kfree(list); 314 return 0; 315 } 316 } 317 return -ENOENT; 318 } 319 320 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 321 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 322 u64 rfer, u64 excl) 323 { 324 struct btrfs_qgroup *qgroup; 325 326 qgroup = find_qgroup_rb(fs_info, qgroupid); 327 if (!qgroup) 328 return -EINVAL; 329 if (qgroup->rfer != rfer || qgroup->excl != excl) 330 return -EINVAL; 331 return 0; 332 } 333 #endif 334 335 /* 336 * The full config is read in one go, only called from open_ctree() 337 * It doesn't use any locking, as at this point we're still single-threaded 338 */ 339 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 340 { 341 struct btrfs_key key; 342 struct btrfs_key found_key; 343 struct btrfs_root *quota_root = fs_info->quota_root; 344 struct btrfs_path *path = NULL; 345 struct extent_buffer *l; 346 int slot; 347 int ret = 0; 348 u64 flags = 0; 349 u64 rescan_progress = 0; 350 351 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 352 return 0; 353 354 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 355 if (!fs_info->qgroup_ulist) { 356 ret = -ENOMEM; 357 goto out; 358 } 359 360 path = btrfs_alloc_path(); 361 if (!path) { 362 ret = -ENOMEM; 363 goto out; 364 } 365 366 /* default this to quota off, in case no status key is found */ 367 fs_info->qgroup_flags = 0; 368 369 /* 370 * pass 1: read status, all qgroup infos and limits 371 */ 372 key.objectid = 0; 373 key.type = 0; 374 key.offset = 0; 375 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 376 if (ret) 377 goto out; 378 379 while (1) { 380 struct btrfs_qgroup *qgroup; 381 382 slot = path->slots[0]; 383 l = path->nodes[0]; 384 btrfs_item_key_to_cpu(l, &found_key, slot); 385 386 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 387 struct btrfs_qgroup_status_item *ptr; 388 389 ptr = btrfs_item_ptr(l, slot, 390 struct btrfs_qgroup_status_item); 391 392 if (btrfs_qgroup_status_version(l, ptr) != 393 BTRFS_QGROUP_STATUS_VERSION) { 394 btrfs_err(fs_info, 395 "old qgroup version, quota disabled"); 396 goto out; 397 } 398 if (btrfs_qgroup_status_generation(l, ptr) != 399 fs_info->generation) { 400 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 401 btrfs_err(fs_info, 402 "qgroup generation mismatch, marked as inconsistent"); 403 } 404 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 405 ptr); 406 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 407 goto next1; 408 } 409 410 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 411 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 412 goto next1; 413 414 qgroup = find_qgroup_rb(fs_info, found_key.offset); 415 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 416 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 417 btrfs_err(fs_info, "inconsistent qgroup config"); 418 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 419 } 420 if (!qgroup) { 421 qgroup = add_qgroup_rb(fs_info, found_key.offset); 422 if (IS_ERR(qgroup)) { 423 ret = PTR_ERR(qgroup); 424 goto out; 425 } 426 } 427 switch (found_key.type) { 428 case BTRFS_QGROUP_INFO_KEY: { 429 struct btrfs_qgroup_info_item *ptr; 430 431 ptr = btrfs_item_ptr(l, slot, 432 struct btrfs_qgroup_info_item); 433 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 434 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 435 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 436 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 437 /* generation currently unused */ 438 break; 439 } 440 case BTRFS_QGROUP_LIMIT_KEY: { 441 struct btrfs_qgroup_limit_item *ptr; 442 443 ptr = btrfs_item_ptr(l, slot, 444 struct btrfs_qgroup_limit_item); 445 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 446 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 447 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 448 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 449 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 450 break; 451 } 452 } 453 next1: 454 ret = btrfs_next_item(quota_root, path); 455 if (ret < 0) 456 goto out; 457 if (ret) 458 break; 459 } 460 btrfs_release_path(path); 461 462 /* 463 * pass 2: read all qgroup relations 464 */ 465 key.objectid = 0; 466 key.type = BTRFS_QGROUP_RELATION_KEY; 467 key.offset = 0; 468 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 469 if (ret) 470 goto out; 471 while (1) { 472 slot = path->slots[0]; 473 l = path->nodes[0]; 474 btrfs_item_key_to_cpu(l, &found_key, slot); 475 476 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 477 goto next2; 478 479 if (found_key.objectid > found_key.offset) { 480 /* parent <- member, not needed to build config */ 481 /* FIXME should we omit the key completely? */ 482 goto next2; 483 } 484 485 ret = add_relation_rb(fs_info, found_key.objectid, 486 found_key.offset); 487 if (ret == -ENOENT) { 488 btrfs_warn(fs_info, 489 "orphan qgroup relation 0x%llx->0x%llx", 490 found_key.objectid, found_key.offset); 491 ret = 0; /* ignore the error */ 492 } 493 if (ret) 494 goto out; 495 next2: 496 ret = btrfs_next_item(quota_root, path); 497 if (ret < 0) 498 goto out; 499 if (ret) 500 break; 501 } 502 out: 503 fs_info->qgroup_flags |= flags; 504 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 505 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 506 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 507 ret >= 0) 508 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 509 btrfs_free_path(path); 510 511 if (ret < 0) { 512 ulist_free(fs_info->qgroup_ulist); 513 fs_info->qgroup_ulist = NULL; 514 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 515 } 516 517 return ret < 0 ? ret : 0; 518 } 519 520 /* 521 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 522 * first two are in single-threaded paths.And for the third one, we have set 523 * quota_root to be null with qgroup_lock held before, so it is safe to clean 524 * up the in-memory structures without qgroup_lock held. 525 */ 526 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 527 { 528 struct rb_node *n; 529 struct btrfs_qgroup *qgroup; 530 531 while ((n = rb_first(&fs_info->qgroup_tree))) { 532 qgroup = rb_entry(n, struct btrfs_qgroup, node); 533 rb_erase(n, &fs_info->qgroup_tree); 534 __del_qgroup_rb(qgroup); 535 } 536 /* 537 * we call btrfs_free_qgroup_config() when umounting 538 * filesystem and disabling quota, so we set qgroup_ulist 539 * to be null here to avoid double free. 540 */ 541 ulist_free(fs_info->qgroup_ulist); 542 fs_info->qgroup_ulist = NULL; 543 } 544 545 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, 546 struct btrfs_root *quota_root, 547 u64 src, u64 dst) 548 { 549 int ret; 550 struct btrfs_path *path; 551 struct btrfs_key key; 552 553 path = btrfs_alloc_path(); 554 if (!path) 555 return -ENOMEM; 556 557 key.objectid = src; 558 key.type = BTRFS_QGROUP_RELATION_KEY; 559 key.offset = dst; 560 561 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 562 563 btrfs_mark_buffer_dirty(path->nodes[0]); 564 565 btrfs_free_path(path); 566 return ret; 567 } 568 569 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, 570 struct btrfs_root *quota_root, 571 u64 src, u64 dst) 572 { 573 int ret; 574 struct btrfs_path *path; 575 struct btrfs_key key; 576 577 path = btrfs_alloc_path(); 578 if (!path) 579 return -ENOMEM; 580 581 key.objectid = src; 582 key.type = BTRFS_QGROUP_RELATION_KEY; 583 key.offset = dst; 584 585 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 586 if (ret < 0) 587 goto out; 588 589 if (ret > 0) { 590 ret = -ENOENT; 591 goto out; 592 } 593 594 ret = btrfs_del_item(trans, quota_root, path); 595 out: 596 btrfs_free_path(path); 597 return ret; 598 } 599 600 static int add_qgroup_item(struct btrfs_trans_handle *trans, 601 struct btrfs_root *quota_root, u64 qgroupid) 602 { 603 int ret; 604 struct btrfs_path *path; 605 struct btrfs_qgroup_info_item *qgroup_info; 606 struct btrfs_qgroup_limit_item *qgroup_limit; 607 struct extent_buffer *leaf; 608 struct btrfs_key key; 609 610 if (btrfs_is_testing(quota_root->fs_info)) 611 return 0; 612 613 path = btrfs_alloc_path(); 614 if (!path) 615 return -ENOMEM; 616 617 key.objectid = 0; 618 key.type = BTRFS_QGROUP_INFO_KEY; 619 key.offset = qgroupid; 620 621 /* 622 * Avoid a transaction abort by catching -EEXIST here. In that 623 * case, we proceed by re-initializing the existing structure 624 * on disk. 625 */ 626 627 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 628 sizeof(*qgroup_info)); 629 if (ret && ret != -EEXIST) 630 goto out; 631 632 leaf = path->nodes[0]; 633 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 634 struct btrfs_qgroup_info_item); 635 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 636 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 637 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 638 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 639 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 640 641 btrfs_mark_buffer_dirty(leaf); 642 643 btrfs_release_path(path); 644 645 key.type = BTRFS_QGROUP_LIMIT_KEY; 646 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 647 sizeof(*qgroup_limit)); 648 if (ret && ret != -EEXIST) 649 goto out; 650 651 leaf = path->nodes[0]; 652 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 653 struct btrfs_qgroup_limit_item); 654 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 655 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 656 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 657 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 658 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 659 660 btrfs_mark_buffer_dirty(leaf); 661 662 ret = 0; 663 out: 664 btrfs_free_path(path); 665 return ret; 666 } 667 668 static int del_qgroup_item(struct btrfs_trans_handle *trans, 669 struct btrfs_root *quota_root, u64 qgroupid) 670 { 671 int ret; 672 struct btrfs_path *path; 673 struct btrfs_key key; 674 675 path = btrfs_alloc_path(); 676 if (!path) 677 return -ENOMEM; 678 679 key.objectid = 0; 680 key.type = BTRFS_QGROUP_INFO_KEY; 681 key.offset = qgroupid; 682 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 683 if (ret < 0) 684 goto out; 685 686 if (ret > 0) { 687 ret = -ENOENT; 688 goto out; 689 } 690 691 ret = btrfs_del_item(trans, quota_root, path); 692 if (ret) 693 goto out; 694 695 btrfs_release_path(path); 696 697 key.type = BTRFS_QGROUP_LIMIT_KEY; 698 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 699 if (ret < 0) 700 goto out; 701 702 if (ret > 0) { 703 ret = -ENOENT; 704 goto out; 705 } 706 707 ret = btrfs_del_item(trans, quota_root, path); 708 709 out: 710 btrfs_free_path(path); 711 return ret; 712 } 713 714 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 715 struct btrfs_root *root, 716 struct btrfs_qgroup *qgroup) 717 { 718 struct btrfs_path *path; 719 struct btrfs_key key; 720 struct extent_buffer *l; 721 struct btrfs_qgroup_limit_item *qgroup_limit; 722 int ret; 723 int slot; 724 725 key.objectid = 0; 726 key.type = BTRFS_QGROUP_LIMIT_KEY; 727 key.offset = qgroup->qgroupid; 728 729 path = btrfs_alloc_path(); 730 if (!path) 731 return -ENOMEM; 732 733 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 734 if (ret > 0) 735 ret = -ENOENT; 736 737 if (ret) 738 goto out; 739 740 l = path->nodes[0]; 741 slot = path->slots[0]; 742 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 743 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 744 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 745 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 746 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 747 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 748 749 btrfs_mark_buffer_dirty(l); 750 751 out: 752 btrfs_free_path(path); 753 return ret; 754 } 755 756 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 757 struct btrfs_root *root, 758 struct btrfs_qgroup *qgroup) 759 { 760 struct btrfs_path *path; 761 struct btrfs_key key; 762 struct extent_buffer *l; 763 struct btrfs_qgroup_info_item *qgroup_info; 764 int ret; 765 int slot; 766 767 if (btrfs_is_testing(root->fs_info)) 768 return 0; 769 770 key.objectid = 0; 771 key.type = BTRFS_QGROUP_INFO_KEY; 772 key.offset = qgroup->qgroupid; 773 774 path = btrfs_alloc_path(); 775 if (!path) 776 return -ENOMEM; 777 778 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 779 if (ret > 0) 780 ret = -ENOENT; 781 782 if (ret) 783 goto out; 784 785 l = path->nodes[0]; 786 slot = path->slots[0]; 787 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 788 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 789 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 790 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 791 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 792 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 793 794 btrfs_mark_buffer_dirty(l); 795 796 out: 797 btrfs_free_path(path); 798 return ret; 799 } 800 801 static int update_qgroup_status_item(struct btrfs_trans_handle *trans, 802 struct btrfs_fs_info *fs_info, 803 struct btrfs_root *root) 804 { 805 struct btrfs_path *path; 806 struct btrfs_key key; 807 struct extent_buffer *l; 808 struct btrfs_qgroup_status_item *ptr; 809 int ret; 810 int slot; 811 812 key.objectid = 0; 813 key.type = BTRFS_QGROUP_STATUS_KEY; 814 key.offset = 0; 815 816 path = btrfs_alloc_path(); 817 if (!path) 818 return -ENOMEM; 819 820 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 821 if (ret > 0) 822 ret = -ENOENT; 823 824 if (ret) 825 goto out; 826 827 l = path->nodes[0]; 828 slot = path->slots[0]; 829 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 830 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); 831 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 832 btrfs_set_qgroup_status_rescan(l, ptr, 833 fs_info->qgroup_rescan_progress.objectid); 834 835 btrfs_mark_buffer_dirty(l); 836 837 out: 838 btrfs_free_path(path); 839 return ret; 840 } 841 842 /* 843 * called with qgroup_lock held 844 */ 845 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 846 struct btrfs_root *root) 847 { 848 struct btrfs_path *path; 849 struct btrfs_key key; 850 struct extent_buffer *leaf = NULL; 851 int ret; 852 int nr = 0; 853 854 path = btrfs_alloc_path(); 855 if (!path) 856 return -ENOMEM; 857 858 path->leave_spinning = 1; 859 860 key.objectid = 0; 861 key.offset = 0; 862 key.type = 0; 863 864 while (1) { 865 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 866 if (ret < 0) 867 goto out; 868 leaf = path->nodes[0]; 869 nr = btrfs_header_nritems(leaf); 870 if (!nr) 871 break; 872 /* 873 * delete the leaf one by one 874 * since the whole tree is going 875 * to be deleted. 876 */ 877 path->slots[0] = 0; 878 ret = btrfs_del_items(trans, root, path, 0, nr); 879 if (ret) 880 goto out; 881 882 btrfs_release_path(path); 883 } 884 ret = 0; 885 out: 886 btrfs_free_path(path); 887 return ret; 888 } 889 890 int btrfs_quota_enable(struct btrfs_trans_handle *trans, 891 struct btrfs_fs_info *fs_info) 892 { 893 struct btrfs_root *quota_root; 894 struct btrfs_root *tree_root = fs_info->tree_root; 895 struct btrfs_path *path = NULL; 896 struct btrfs_qgroup_status_item *ptr; 897 struct extent_buffer *leaf; 898 struct btrfs_key key; 899 struct btrfs_key found_key; 900 struct btrfs_qgroup *qgroup = NULL; 901 int ret = 0; 902 int slot; 903 904 mutex_lock(&fs_info->qgroup_ioctl_lock); 905 if (fs_info->quota_root) 906 goto out; 907 908 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 909 if (!fs_info->qgroup_ulist) { 910 ret = -ENOMEM; 911 goto out; 912 } 913 914 /* 915 * initially create the quota tree 916 */ 917 quota_root = btrfs_create_tree(trans, fs_info, 918 BTRFS_QUOTA_TREE_OBJECTID); 919 if (IS_ERR(quota_root)) { 920 ret = PTR_ERR(quota_root); 921 goto out; 922 } 923 924 path = btrfs_alloc_path(); 925 if (!path) { 926 ret = -ENOMEM; 927 goto out_free_root; 928 } 929 930 key.objectid = 0; 931 key.type = BTRFS_QGROUP_STATUS_KEY; 932 key.offset = 0; 933 934 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 935 sizeof(*ptr)); 936 if (ret) 937 goto out_free_path; 938 939 leaf = path->nodes[0]; 940 ptr = btrfs_item_ptr(leaf, path->slots[0], 941 struct btrfs_qgroup_status_item); 942 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 943 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 944 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 945 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 946 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); 947 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 948 949 btrfs_mark_buffer_dirty(leaf); 950 951 key.objectid = 0; 952 key.type = BTRFS_ROOT_REF_KEY; 953 key.offset = 0; 954 955 btrfs_release_path(path); 956 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 957 if (ret > 0) 958 goto out_add_root; 959 if (ret < 0) 960 goto out_free_path; 961 962 963 while (1) { 964 slot = path->slots[0]; 965 leaf = path->nodes[0]; 966 btrfs_item_key_to_cpu(leaf, &found_key, slot); 967 968 if (found_key.type == BTRFS_ROOT_REF_KEY) { 969 ret = add_qgroup_item(trans, quota_root, 970 found_key.offset); 971 if (ret) 972 goto out_free_path; 973 974 qgroup = add_qgroup_rb(fs_info, found_key.offset); 975 if (IS_ERR(qgroup)) { 976 ret = PTR_ERR(qgroup); 977 goto out_free_path; 978 } 979 } 980 ret = btrfs_next_item(tree_root, path); 981 if (ret < 0) 982 goto out_free_path; 983 if (ret) 984 break; 985 } 986 987 out_add_root: 988 btrfs_release_path(path); 989 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 990 if (ret) 991 goto out_free_path; 992 993 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); 994 if (IS_ERR(qgroup)) { 995 ret = PTR_ERR(qgroup); 996 goto out_free_path; 997 } 998 spin_lock(&fs_info->qgroup_lock); 999 fs_info->quota_root = quota_root; 1000 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1001 spin_unlock(&fs_info->qgroup_lock); 1002 ret = qgroup_rescan_init(fs_info, 0, 1); 1003 if (!ret) { 1004 qgroup_rescan_zero_tracking(fs_info); 1005 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1006 &fs_info->qgroup_rescan_work); 1007 } 1008 1009 out_free_path: 1010 btrfs_free_path(path); 1011 out_free_root: 1012 if (ret) { 1013 free_extent_buffer(quota_root->node); 1014 free_extent_buffer(quota_root->commit_root); 1015 kfree(quota_root); 1016 } 1017 out: 1018 if (ret) { 1019 ulist_free(fs_info->qgroup_ulist); 1020 fs_info->qgroup_ulist = NULL; 1021 } 1022 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1023 return ret; 1024 } 1025 1026 int btrfs_quota_disable(struct btrfs_trans_handle *trans, 1027 struct btrfs_fs_info *fs_info) 1028 { 1029 struct btrfs_root *quota_root; 1030 int ret = 0; 1031 1032 mutex_lock(&fs_info->qgroup_ioctl_lock); 1033 if (!fs_info->quota_root) 1034 goto out; 1035 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1036 btrfs_qgroup_wait_for_completion(fs_info, false); 1037 spin_lock(&fs_info->qgroup_lock); 1038 quota_root = fs_info->quota_root; 1039 fs_info->quota_root = NULL; 1040 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1041 spin_unlock(&fs_info->qgroup_lock); 1042 1043 btrfs_free_qgroup_config(fs_info); 1044 1045 ret = btrfs_clean_quota_tree(trans, quota_root); 1046 if (ret) 1047 goto out; 1048 1049 ret = btrfs_del_root(trans, fs_info, "a_root->root_key); 1050 if (ret) 1051 goto out; 1052 1053 list_del("a_root->dirty_list); 1054 1055 btrfs_tree_lock(quota_root->node); 1056 clean_tree_block(fs_info, quota_root->node); 1057 btrfs_tree_unlock(quota_root->node); 1058 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 1059 1060 free_extent_buffer(quota_root->node); 1061 free_extent_buffer(quota_root->commit_root); 1062 kfree(quota_root); 1063 out: 1064 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1065 return ret; 1066 } 1067 1068 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1069 struct btrfs_qgroup *qgroup) 1070 { 1071 if (list_empty(&qgroup->dirty)) 1072 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1073 } 1074 1075 /* 1076 * The easy accounting, we're updating qgroup relationship whose child qgroup 1077 * only has exclusive extents. 1078 * 1079 * In this case, all exclsuive extents will also be exlusive for parent, so 1080 * excl/rfer just get added/removed. 1081 * 1082 * So is qgroup reservation space, which should also be added/removed to 1083 * parent. 1084 * Or when child tries to release reservation space, parent will underflow its 1085 * reservation (for relationship adding case). 1086 * 1087 * Caller should hold fs_info->qgroup_lock. 1088 */ 1089 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1090 struct ulist *tmp, u64 ref_root, 1091 struct btrfs_qgroup *src, int sign) 1092 { 1093 struct btrfs_qgroup *qgroup; 1094 struct btrfs_qgroup_list *glist; 1095 struct ulist_node *unode; 1096 struct ulist_iterator uiter; 1097 u64 num_bytes = src->excl; 1098 int ret = 0; 1099 1100 qgroup = find_qgroup_rb(fs_info, ref_root); 1101 if (!qgroup) 1102 goto out; 1103 1104 qgroup->rfer += sign * num_bytes; 1105 qgroup->rfer_cmpr += sign * num_bytes; 1106 1107 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1108 qgroup->excl += sign * num_bytes; 1109 qgroup->excl_cmpr += sign * num_bytes; 1110 1111 if (sign > 0) 1112 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1113 else 1114 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1115 1116 qgroup_dirty(fs_info, qgroup); 1117 1118 /* Get all of the parent groups that contain this qgroup */ 1119 list_for_each_entry(glist, &qgroup->groups, next_group) { 1120 ret = ulist_add(tmp, glist->group->qgroupid, 1121 qgroup_to_aux(glist->group), GFP_ATOMIC); 1122 if (ret < 0) 1123 goto out; 1124 } 1125 1126 /* Iterate all of the parents and adjust their reference counts */ 1127 ULIST_ITER_INIT(&uiter); 1128 while ((unode = ulist_next(tmp, &uiter))) { 1129 qgroup = unode_aux_to_qgroup(unode); 1130 qgroup->rfer += sign * num_bytes; 1131 qgroup->rfer_cmpr += sign * num_bytes; 1132 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1133 qgroup->excl += sign * num_bytes; 1134 if (sign > 0) 1135 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1136 else 1137 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1138 qgroup->excl_cmpr += sign * num_bytes; 1139 qgroup_dirty(fs_info, qgroup); 1140 1141 /* Add any parents of the parents */ 1142 list_for_each_entry(glist, &qgroup->groups, next_group) { 1143 ret = ulist_add(tmp, glist->group->qgroupid, 1144 qgroup_to_aux(glist->group), GFP_ATOMIC); 1145 if (ret < 0) 1146 goto out; 1147 } 1148 } 1149 ret = 0; 1150 out: 1151 return ret; 1152 } 1153 1154 1155 /* 1156 * Quick path for updating qgroup with only excl refs. 1157 * 1158 * In that case, just update all parent will be enough. 1159 * Or we needs to do a full rescan. 1160 * Caller should also hold fs_info->qgroup_lock. 1161 * 1162 * Return 0 for quick update, return >0 for need to full rescan 1163 * and mark INCONSISTENT flag. 1164 * Return < 0 for other error. 1165 */ 1166 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1167 struct ulist *tmp, u64 src, u64 dst, 1168 int sign) 1169 { 1170 struct btrfs_qgroup *qgroup; 1171 int ret = 1; 1172 int err = 0; 1173 1174 qgroup = find_qgroup_rb(fs_info, src); 1175 if (!qgroup) 1176 goto out; 1177 if (qgroup->excl == qgroup->rfer) { 1178 ret = 0; 1179 err = __qgroup_excl_accounting(fs_info, tmp, dst, 1180 qgroup, sign); 1181 if (err < 0) { 1182 ret = err; 1183 goto out; 1184 } 1185 } 1186 out: 1187 if (ret) 1188 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1189 return ret; 1190 } 1191 1192 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, 1193 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1194 { 1195 struct btrfs_root *quota_root; 1196 struct btrfs_qgroup *parent; 1197 struct btrfs_qgroup *member; 1198 struct btrfs_qgroup_list *list; 1199 struct ulist *tmp; 1200 int ret = 0; 1201 1202 /* Check the level of src and dst first */ 1203 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1204 return -EINVAL; 1205 1206 tmp = ulist_alloc(GFP_KERNEL); 1207 if (!tmp) 1208 return -ENOMEM; 1209 1210 mutex_lock(&fs_info->qgroup_ioctl_lock); 1211 quota_root = fs_info->quota_root; 1212 if (!quota_root) { 1213 ret = -EINVAL; 1214 goto out; 1215 } 1216 member = find_qgroup_rb(fs_info, src); 1217 parent = find_qgroup_rb(fs_info, dst); 1218 if (!member || !parent) { 1219 ret = -EINVAL; 1220 goto out; 1221 } 1222 1223 /* check if such qgroup relation exist firstly */ 1224 list_for_each_entry(list, &member->groups, next_group) { 1225 if (list->group == parent) { 1226 ret = -EEXIST; 1227 goto out; 1228 } 1229 } 1230 1231 ret = add_qgroup_relation_item(trans, quota_root, src, dst); 1232 if (ret) 1233 goto out; 1234 1235 ret = add_qgroup_relation_item(trans, quota_root, dst, src); 1236 if (ret) { 1237 del_qgroup_relation_item(trans, quota_root, src, dst); 1238 goto out; 1239 } 1240 1241 spin_lock(&fs_info->qgroup_lock); 1242 ret = add_relation_rb(fs_info, src, dst); 1243 if (ret < 0) { 1244 spin_unlock(&fs_info->qgroup_lock); 1245 goto out; 1246 } 1247 ret = quick_update_accounting(fs_info, tmp, src, dst, 1); 1248 spin_unlock(&fs_info->qgroup_lock); 1249 out: 1250 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1251 ulist_free(tmp); 1252 return ret; 1253 } 1254 1255 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, 1256 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1257 { 1258 struct btrfs_root *quota_root; 1259 struct btrfs_qgroup *parent; 1260 struct btrfs_qgroup *member; 1261 struct btrfs_qgroup_list *list; 1262 struct ulist *tmp; 1263 int ret = 0; 1264 int err; 1265 1266 tmp = ulist_alloc(GFP_KERNEL); 1267 if (!tmp) 1268 return -ENOMEM; 1269 1270 quota_root = fs_info->quota_root; 1271 if (!quota_root) { 1272 ret = -EINVAL; 1273 goto out; 1274 } 1275 1276 member = find_qgroup_rb(fs_info, src); 1277 parent = find_qgroup_rb(fs_info, dst); 1278 if (!member || !parent) { 1279 ret = -EINVAL; 1280 goto out; 1281 } 1282 1283 /* check if such qgroup relation exist firstly */ 1284 list_for_each_entry(list, &member->groups, next_group) { 1285 if (list->group == parent) 1286 goto exist; 1287 } 1288 ret = -ENOENT; 1289 goto out; 1290 exist: 1291 ret = del_qgroup_relation_item(trans, quota_root, src, dst); 1292 err = del_qgroup_relation_item(trans, quota_root, dst, src); 1293 if (err && !ret) 1294 ret = err; 1295 1296 spin_lock(&fs_info->qgroup_lock); 1297 del_relation_rb(fs_info, src, dst); 1298 ret = quick_update_accounting(fs_info, tmp, src, dst, -1); 1299 spin_unlock(&fs_info->qgroup_lock); 1300 out: 1301 ulist_free(tmp); 1302 return ret; 1303 } 1304 1305 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, 1306 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1307 { 1308 int ret = 0; 1309 1310 mutex_lock(&fs_info->qgroup_ioctl_lock); 1311 ret = __del_qgroup_relation(trans, fs_info, src, dst); 1312 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1313 1314 return ret; 1315 } 1316 1317 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, 1318 struct btrfs_fs_info *fs_info, u64 qgroupid) 1319 { 1320 struct btrfs_root *quota_root; 1321 struct btrfs_qgroup *qgroup; 1322 int ret = 0; 1323 1324 mutex_lock(&fs_info->qgroup_ioctl_lock); 1325 quota_root = fs_info->quota_root; 1326 if (!quota_root) { 1327 ret = -EINVAL; 1328 goto out; 1329 } 1330 qgroup = find_qgroup_rb(fs_info, qgroupid); 1331 if (qgroup) { 1332 ret = -EEXIST; 1333 goto out; 1334 } 1335 1336 ret = add_qgroup_item(trans, quota_root, qgroupid); 1337 if (ret) 1338 goto out; 1339 1340 spin_lock(&fs_info->qgroup_lock); 1341 qgroup = add_qgroup_rb(fs_info, qgroupid); 1342 spin_unlock(&fs_info->qgroup_lock); 1343 1344 if (IS_ERR(qgroup)) 1345 ret = PTR_ERR(qgroup); 1346 out: 1347 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1348 return ret; 1349 } 1350 1351 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, 1352 struct btrfs_fs_info *fs_info, u64 qgroupid) 1353 { 1354 struct btrfs_root *quota_root; 1355 struct btrfs_qgroup *qgroup; 1356 struct btrfs_qgroup_list *list; 1357 int ret = 0; 1358 1359 mutex_lock(&fs_info->qgroup_ioctl_lock); 1360 quota_root = fs_info->quota_root; 1361 if (!quota_root) { 1362 ret = -EINVAL; 1363 goto out; 1364 } 1365 1366 qgroup = find_qgroup_rb(fs_info, qgroupid); 1367 if (!qgroup) { 1368 ret = -ENOENT; 1369 goto out; 1370 } else { 1371 /* check if there are no children of this qgroup */ 1372 if (!list_empty(&qgroup->members)) { 1373 ret = -EBUSY; 1374 goto out; 1375 } 1376 } 1377 ret = del_qgroup_item(trans, quota_root, qgroupid); 1378 if (ret && ret != -ENOENT) 1379 goto out; 1380 1381 while (!list_empty(&qgroup->groups)) { 1382 list = list_first_entry(&qgroup->groups, 1383 struct btrfs_qgroup_list, next_group); 1384 ret = __del_qgroup_relation(trans, fs_info, 1385 qgroupid, 1386 list->group->qgroupid); 1387 if (ret) 1388 goto out; 1389 } 1390 1391 spin_lock(&fs_info->qgroup_lock); 1392 del_qgroup_rb(fs_info, qgroupid); 1393 spin_unlock(&fs_info->qgroup_lock); 1394 out: 1395 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1396 return ret; 1397 } 1398 1399 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, 1400 struct btrfs_fs_info *fs_info, u64 qgroupid, 1401 struct btrfs_qgroup_limit *limit) 1402 { 1403 struct btrfs_root *quota_root; 1404 struct btrfs_qgroup *qgroup; 1405 int ret = 0; 1406 /* Sometimes we would want to clear the limit on this qgroup. 1407 * To meet this requirement, we treat the -1 as a special value 1408 * which tell kernel to clear the limit on this qgroup. 1409 */ 1410 const u64 CLEAR_VALUE = -1; 1411 1412 mutex_lock(&fs_info->qgroup_ioctl_lock); 1413 quota_root = fs_info->quota_root; 1414 if (!quota_root) { 1415 ret = -EINVAL; 1416 goto out; 1417 } 1418 1419 qgroup = find_qgroup_rb(fs_info, qgroupid); 1420 if (!qgroup) { 1421 ret = -ENOENT; 1422 goto out; 1423 } 1424 1425 spin_lock(&fs_info->qgroup_lock); 1426 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1427 if (limit->max_rfer == CLEAR_VALUE) { 1428 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1429 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1430 qgroup->max_rfer = 0; 1431 } else { 1432 qgroup->max_rfer = limit->max_rfer; 1433 } 1434 } 1435 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1436 if (limit->max_excl == CLEAR_VALUE) { 1437 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1438 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1439 qgroup->max_excl = 0; 1440 } else { 1441 qgroup->max_excl = limit->max_excl; 1442 } 1443 } 1444 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1445 if (limit->rsv_rfer == CLEAR_VALUE) { 1446 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1447 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1448 qgroup->rsv_rfer = 0; 1449 } else { 1450 qgroup->rsv_rfer = limit->rsv_rfer; 1451 } 1452 } 1453 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1454 if (limit->rsv_excl == CLEAR_VALUE) { 1455 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1456 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1457 qgroup->rsv_excl = 0; 1458 } else { 1459 qgroup->rsv_excl = limit->rsv_excl; 1460 } 1461 } 1462 qgroup->lim_flags |= limit->flags; 1463 1464 spin_unlock(&fs_info->qgroup_lock); 1465 1466 ret = update_qgroup_limit_item(trans, quota_root, qgroup); 1467 if (ret) { 1468 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1469 btrfs_info(fs_info, "unable to update quota limit for %llu", 1470 qgroupid); 1471 } 1472 1473 out: 1474 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1475 return ret; 1476 } 1477 1478 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 1479 struct btrfs_delayed_ref_root *delayed_refs, 1480 struct btrfs_qgroup_extent_record *record) 1481 { 1482 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1483 struct rb_node *parent_node = NULL; 1484 struct btrfs_qgroup_extent_record *entry; 1485 u64 bytenr = record->bytenr; 1486 1487 lockdep_assert_held(&delayed_refs->lock); 1488 trace_btrfs_qgroup_trace_extent(fs_info, record); 1489 1490 while (*p) { 1491 parent_node = *p; 1492 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1493 node); 1494 if (bytenr < entry->bytenr) 1495 p = &(*p)->rb_left; 1496 else if (bytenr > entry->bytenr) 1497 p = &(*p)->rb_right; 1498 else 1499 return 1; 1500 } 1501 1502 rb_link_node(&record->node, parent_node, p); 1503 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1504 return 0; 1505 } 1506 1507 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, 1508 struct btrfs_qgroup_extent_record *qrecord) 1509 { 1510 struct ulist *old_root; 1511 u64 bytenr = qrecord->bytenr; 1512 int ret; 1513 1514 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); 1515 if (ret < 0) { 1516 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1517 btrfs_warn(fs_info, 1518 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 1519 ret); 1520 return 0; 1521 } 1522 1523 /* 1524 * Here we don't need to get the lock of 1525 * trans->transaction->delayed_refs, since inserted qrecord won't 1526 * be deleted, only qrecord->node may be modified (new qrecord insert) 1527 * 1528 * So modifying qrecord->old_roots is safe here 1529 */ 1530 qrecord->old_roots = old_root; 1531 return 0; 1532 } 1533 1534 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, 1535 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, 1536 gfp_t gfp_flag) 1537 { 1538 struct btrfs_qgroup_extent_record *record; 1539 struct btrfs_delayed_ref_root *delayed_refs; 1540 int ret; 1541 1542 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) 1543 || bytenr == 0 || num_bytes == 0) 1544 return 0; 1545 if (WARN_ON(trans == NULL)) 1546 return -EINVAL; 1547 record = kmalloc(sizeof(*record), gfp_flag); 1548 if (!record) 1549 return -ENOMEM; 1550 1551 delayed_refs = &trans->transaction->delayed_refs; 1552 record->bytenr = bytenr; 1553 record->num_bytes = num_bytes; 1554 record->old_roots = NULL; 1555 1556 spin_lock(&delayed_refs->lock); 1557 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 1558 spin_unlock(&delayed_refs->lock); 1559 if (ret > 0) { 1560 kfree(record); 1561 return 0; 1562 } 1563 return btrfs_qgroup_trace_extent_post(fs_info, record); 1564 } 1565 1566 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 1567 struct btrfs_fs_info *fs_info, 1568 struct extent_buffer *eb) 1569 { 1570 int nr = btrfs_header_nritems(eb); 1571 int i, extent_type, ret; 1572 struct btrfs_key key; 1573 struct btrfs_file_extent_item *fi; 1574 u64 bytenr, num_bytes; 1575 1576 /* We can be called directly from walk_up_proc() */ 1577 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1578 return 0; 1579 1580 for (i = 0; i < nr; i++) { 1581 btrfs_item_key_to_cpu(eb, &key, i); 1582 1583 if (key.type != BTRFS_EXTENT_DATA_KEY) 1584 continue; 1585 1586 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 1587 /* filter out non qgroup-accountable extents */ 1588 extent_type = btrfs_file_extent_type(eb, fi); 1589 1590 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1591 continue; 1592 1593 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 1594 if (!bytenr) 1595 continue; 1596 1597 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 1598 1599 ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr, 1600 num_bytes, GFP_NOFS); 1601 if (ret) 1602 return ret; 1603 } 1604 cond_resched(); 1605 return 0; 1606 } 1607 1608 /* 1609 * Walk up the tree from the bottom, freeing leaves and any interior 1610 * nodes which have had all slots visited. If a node (leaf or 1611 * interior) is freed, the node above it will have it's slot 1612 * incremented. The root node will never be freed. 1613 * 1614 * At the end of this function, we should have a path which has all 1615 * slots incremented to the next position for a search. If we need to 1616 * read a new node it will be NULL and the node above it will have the 1617 * correct slot selected for a later read. 1618 * 1619 * If we increment the root nodes slot counter past the number of 1620 * elements, 1 is returned to signal completion of the search. 1621 */ 1622 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 1623 { 1624 int level = 0; 1625 int nr, slot; 1626 struct extent_buffer *eb; 1627 1628 if (root_level == 0) 1629 return 1; 1630 1631 while (level <= root_level) { 1632 eb = path->nodes[level]; 1633 nr = btrfs_header_nritems(eb); 1634 path->slots[level]++; 1635 slot = path->slots[level]; 1636 if (slot >= nr || level == 0) { 1637 /* 1638 * Don't free the root - we will detect this 1639 * condition after our loop and return a 1640 * positive value for caller to stop walking the tree. 1641 */ 1642 if (level != root_level) { 1643 btrfs_tree_unlock_rw(eb, path->locks[level]); 1644 path->locks[level] = 0; 1645 1646 free_extent_buffer(eb); 1647 path->nodes[level] = NULL; 1648 path->slots[level] = 0; 1649 } 1650 } else { 1651 /* 1652 * We have a valid slot to walk back down 1653 * from. Stop here so caller can process these 1654 * new nodes. 1655 */ 1656 break; 1657 } 1658 1659 level++; 1660 } 1661 1662 eb = path->nodes[root_level]; 1663 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 1664 return 1; 1665 1666 return 0; 1667 } 1668 1669 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 1670 struct btrfs_root *root, 1671 struct extent_buffer *root_eb, 1672 u64 root_gen, int root_level) 1673 { 1674 struct btrfs_fs_info *fs_info = root->fs_info; 1675 int ret = 0; 1676 int level; 1677 struct extent_buffer *eb = root_eb; 1678 struct btrfs_path *path = NULL; 1679 1680 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); 1681 BUG_ON(root_eb == NULL); 1682 1683 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1684 return 0; 1685 1686 if (!extent_buffer_uptodate(root_eb)) { 1687 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL); 1688 if (ret) 1689 goto out; 1690 } 1691 1692 if (root_level == 0) { 1693 ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb); 1694 goto out; 1695 } 1696 1697 path = btrfs_alloc_path(); 1698 if (!path) 1699 return -ENOMEM; 1700 1701 /* 1702 * Walk down the tree. Missing extent blocks are filled in as 1703 * we go. Metadata is accounted every time we read a new 1704 * extent block. 1705 * 1706 * When we reach a leaf, we account for file extent items in it, 1707 * walk back up the tree (adjusting slot pointers as we go) 1708 * and restart the search process. 1709 */ 1710 extent_buffer_get(root_eb); /* For path */ 1711 path->nodes[root_level] = root_eb; 1712 path->slots[root_level] = 0; 1713 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 1714 walk_down: 1715 level = root_level; 1716 while (level >= 0) { 1717 if (path->nodes[level] == NULL) { 1718 struct btrfs_key first_key; 1719 int parent_slot; 1720 u64 child_gen; 1721 u64 child_bytenr; 1722 1723 /* 1724 * We need to get child blockptr/gen from parent before 1725 * we can read it. 1726 */ 1727 eb = path->nodes[level + 1]; 1728 parent_slot = path->slots[level + 1]; 1729 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 1730 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 1731 btrfs_node_key_to_cpu(eb, &first_key, parent_slot); 1732 1733 eb = read_tree_block(fs_info, child_bytenr, child_gen, 1734 level, &first_key); 1735 if (IS_ERR(eb)) { 1736 ret = PTR_ERR(eb); 1737 goto out; 1738 } else if (!extent_buffer_uptodate(eb)) { 1739 free_extent_buffer(eb); 1740 ret = -EIO; 1741 goto out; 1742 } 1743 1744 path->nodes[level] = eb; 1745 path->slots[level] = 0; 1746 1747 btrfs_tree_read_lock(eb); 1748 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1749 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 1750 1751 ret = btrfs_qgroup_trace_extent(trans, fs_info, 1752 child_bytenr, 1753 fs_info->nodesize, 1754 GFP_NOFS); 1755 if (ret) 1756 goto out; 1757 } 1758 1759 if (level == 0) { 1760 ret = btrfs_qgroup_trace_leaf_items(trans,fs_info, 1761 path->nodes[level]); 1762 if (ret) 1763 goto out; 1764 1765 /* Nonzero return here means we completed our search */ 1766 ret = adjust_slots_upwards(path, root_level); 1767 if (ret) 1768 break; 1769 1770 /* Restart search with new slots */ 1771 goto walk_down; 1772 } 1773 1774 level--; 1775 } 1776 1777 ret = 0; 1778 out: 1779 btrfs_free_path(path); 1780 1781 return ret; 1782 } 1783 1784 #define UPDATE_NEW 0 1785 #define UPDATE_OLD 1 1786 /* 1787 * Walk all of the roots that points to the bytenr and adjust their refcnts. 1788 */ 1789 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 1790 struct ulist *roots, struct ulist *tmp, 1791 struct ulist *qgroups, u64 seq, int update_old) 1792 { 1793 struct ulist_node *unode; 1794 struct ulist_iterator uiter; 1795 struct ulist_node *tmp_unode; 1796 struct ulist_iterator tmp_uiter; 1797 struct btrfs_qgroup *qg; 1798 int ret = 0; 1799 1800 if (!roots) 1801 return 0; 1802 ULIST_ITER_INIT(&uiter); 1803 while ((unode = ulist_next(roots, &uiter))) { 1804 qg = find_qgroup_rb(fs_info, unode->val); 1805 if (!qg) 1806 continue; 1807 1808 ulist_reinit(tmp); 1809 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), 1810 GFP_ATOMIC); 1811 if (ret < 0) 1812 return ret; 1813 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); 1814 if (ret < 0) 1815 return ret; 1816 ULIST_ITER_INIT(&tmp_uiter); 1817 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 1818 struct btrfs_qgroup_list *glist; 1819 1820 qg = unode_aux_to_qgroup(tmp_unode); 1821 if (update_old) 1822 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 1823 else 1824 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 1825 list_for_each_entry(glist, &qg->groups, next_group) { 1826 ret = ulist_add(qgroups, glist->group->qgroupid, 1827 qgroup_to_aux(glist->group), 1828 GFP_ATOMIC); 1829 if (ret < 0) 1830 return ret; 1831 ret = ulist_add(tmp, glist->group->qgroupid, 1832 qgroup_to_aux(glist->group), 1833 GFP_ATOMIC); 1834 if (ret < 0) 1835 return ret; 1836 } 1837 } 1838 } 1839 return 0; 1840 } 1841 1842 /* 1843 * Update qgroup rfer/excl counters. 1844 * Rfer update is easy, codes can explain themselves. 1845 * 1846 * Excl update is tricky, the update is split into 2 part. 1847 * Part 1: Possible exclusive <-> sharing detect: 1848 * | A | !A | 1849 * ------------------------------------- 1850 * B | * | - | 1851 * ------------------------------------- 1852 * !B | + | ** | 1853 * ------------------------------------- 1854 * 1855 * Conditions: 1856 * A: cur_old_roots < nr_old_roots (not exclusive before) 1857 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 1858 * B: cur_new_roots < nr_new_roots (not exclusive now) 1859 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 1860 * 1861 * Results: 1862 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 1863 * *: Definitely not changed. **: Possible unchanged. 1864 * 1865 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 1866 * 1867 * To make the logic clear, we first use condition A and B to split 1868 * combination into 4 results. 1869 * 1870 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 1871 * only on variant maybe 0. 1872 * 1873 * Lastly, check result **, since there are 2 variants maybe 0, split them 1874 * again(2x2). 1875 * But this time we don't need to consider other things, the codes and logic 1876 * is easy to understand now. 1877 */ 1878 static int qgroup_update_counters(struct btrfs_fs_info *fs_info, 1879 struct ulist *qgroups, 1880 u64 nr_old_roots, 1881 u64 nr_new_roots, 1882 u64 num_bytes, u64 seq) 1883 { 1884 struct ulist_node *unode; 1885 struct ulist_iterator uiter; 1886 struct btrfs_qgroup *qg; 1887 u64 cur_new_count, cur_old_count; 1888 1889 ULIST_ITER_INIT(&uiter); 1890 while ((unode = ulist_next(qgroups, &uiter))) { 1891 bool dirty = false; 1892 1893 qg = unode_aux_to_qgroup(unode); 1894 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 1895 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 1896 1897 trace_qgroup_update_counters(fs_info, qg->qgroupid, 1898 cur_old_count, cur_new_count); 1899 1900 /* Rfer update part */ 1901 if (cur_old_count == 0 && cur_new_count > 0) { 1902 qg->rfer += num_bytes; 1903 qg->rfer_cmpr += num_bytes; 1904 dirty = true; 1905 } 1906 if (cur_old_count > 0 && cur_new_count == 0) { 1907 qg->rfer -= num_bytes; 1908 qg->rfer_cmpr -= num_bytes; 1909 dirty = true; 1910 } 1911 1912 /* Excl update part */ 1913 /* Exclusive/none -> shared case */ 1914 if (cur_old_count == nr_old_roots && 1915 cur_new_count < nr_new_roots) { 1916 /* Exclusive -> shared */ 1917 if (cur_old_count != 0) { 1918 qg->excl -= num_bytes; 1919 qg->excl_cmpr -= num_bytes; 1920 dirty = true; 1921 } 1922 } 1923 1924 /* Shared -> exclusive/none case */ 1925 if (cur_old_count < nr_old_roots && 1926 cur_new_count == nr_new_roots) { 1927 /* Shared->exclusive */ 1928 if (cur_new_count != 0) { 1929 qg->excl += num_bytes; 1930 qg->excl_cmpr += num_bytes; 1931 dirty = true; 1932 } 1933 } 1934 1935 /* Exclusive/none -> exclusive/none case */ 1936 if (cur_old_count == nr_old_roots && 1937 cur_new_count == nr_new_roots) { 1938 if (cur_old_count == 0) { 1939 /* None -> exclusive/none */ 1940 1941 if (cur_new_count != 0) { 1942 /* None -> exclusive */ 1943 qg->excl += num_bytes; 1944 qg->excl_cmpr += num_bytes; 1945 dirty = true; 1946 } 1947 /* None -> none, nothing changed */ 1948 } else { 1949 /* Exclusive -> exclusive/none */ 1950 1951 if (cur_new_count == 0) { 1952 /* Exclusive -> none */ 1953 qg->excl -= num_bytes; 1954 qg->excl_cmpr -= num_bytes; 1955 dirty = true; 1956 } 1957 /* Exclusive -> exclusive, nothing changed */ 1958 } 1959 } 1960 1961 if (dirty) 1962 qgroup_dirty(fs_info, qg); 1963 } 1964 return 0; 1965 } 1966 1967 /* 1968 * Check if the @roots potentially is a list of fs tree roots 1969 * 1970 * Return 0 for definitely not a fs/subvol tree roots ulist 1971 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 1972 * one as well) 1973 */ 1974 static int maybe_fs_roots(struct ulist *roots) 1975 { 1976 struct ulist_node *unode; 1977 struct ulist_iterator uiter; 1978 1979 /* Empty one, still possible for fs roots */ 1980 if (!roots || roots->nnodes == 0) 1981 return 1; 1982 1983 ULIST_ITER_INIT(&uiter); 1984 unode = ulist_next(roots, &uiter); 1985 if (!unode) 1986 return 1; 1987 1988 /* 1989 * If it contains fs tree roots, then it must belong to fs/subvol 1990 * trees. 1991 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 1992 */ 1993 return is_fstree(unode->val); 1994 } 1995 1996 int 1997 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, 1998 struct btrfs_fs_info *fs_info, 1999 u64 bytenr, u64 num_bytes, 2000 struct ulist *old_roots, struct ulist *new_roots) 2001 { 2002 struct ulist *qgroups = NULL; 2003 struct ulist *tmp = NULL; 2004 u64 seq; 2005 u64 nr_new_roots = 0; 2006 u64 nr_old_roots = 0; 2007 int ret = 0; 2008 2009 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2010 return 0; 2011 2012 if (new_roots) { 2013 if (!maybe_fs_roots(new_roots)) 2014 goto out_free; 2015 nr_new_roots = new_roots->nnodes; 2016 } 2017 if (old_roots) { 2018 if (!maybe_fs_roots(old_roots)) 2019 goto out_free; 2020 nr_old_roots = old_roots->nnodes; 2021 } 2022 2023 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2024 if (nr_old_roots == 0 && nr_new_roots == 0) 2025 goto out_free; 2026 2027 BUG_ON(!fs_info->quota_root); 2028 2029 trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes, 2030 nr_old_roots, nr_new_roots); 2031 2032 qgroups = ulist_alloc(GFP_NOFS); 2033 if (!qgroups) { 2034 ret = -ENOMEM; 2035 goto out_free; 2036 } 2037 tmp = ulist_alloc(GFP_NOFS); 2038 if (!tmp) { 2039 ret = -ENOMEM; 2040 goto out_free; 2041 } 2042 2043 mutex_lock(&fs_info->qgroup_rescan_lock); 2044 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2045 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2046 mutex_unlock(&fs_info->qgroup_rescan_lock); 2047 ret = 0; 2048 goto out_free; 2049 } 2050 } 2051 mutex_unlock(&fs_info->qgroup_rescan_lock); 2052 2053 spin_lock(&fs_info->qgroup_lock); 2054 seq = fs_info->qgroup_seq; 2055 2056 /* Update old refcnts using old_roots */ 2057 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, 2058 UPDATE_OLD); 2059 if (ret < 0) 2060 goto out; 2061 2062 /* Update new refcnts using new_roots */ 2063 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, 2064 UPDATE_NEW); 2065 if (ret < 0) 2066 goto out; 2067 2068 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, 2069 num_bytes, seq); 2070 2071 /* 2072 * Bump qgroup_seq to avoid seq overlap 2073 */ 2074 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2075 out: 2076 spin_unlock(&fs_info->qgroup_lock); 2077 out_free: 2078 ulist_free(tmp); 2079 ulist_free(qgroups); 2080 ulist_free(old_roots); 2081 ulist_free(new_roots); 2082 return ret; 2083 } 2084 2085 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 2086 { 2087 struct btrfs_fs_info *fs_info = trans->fs_info; 2088 struct btrfs_qgroup_extent_record *record; 2089 struct btrfs_delayed_ref_root *delayed_refs; 2090 struct ulist *new_roots = NULL; 2091 struct rb_node *node; 2092 u64 qgroup_to_skip; 2093 int ret = 0; 2094 2095 delayed_refs = &trans->transaction->delayed_refs; 2096 qgroup_to_skip = delayed_refs->qgroup_to_skip; 2097 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 2098 record = rb_entry(node, struct btrfs_qgroup_extent_record, 2099 node); 2100 2101 trace_btrfs_qgroup_account_extents(fs_info, record); 2102 2103 if (!ret) { 2104 /* 2105 * Old roots should be searched when inserting qgroup 2106 * extent record 2107 */ 2108 if (WARN_ON(!record->old_roots)) { 2109 /* Search commit root to find old_roots */ 2110 ret = btrfs_find_all_roots(NULL, fs_info, 2111 record->bytenr, 0, 2112 &record->old_roots, false); 2113 if (ret < 0) 2114 goto cleanup; 2115 } 2116 2117 /* 2118 * Use SEQ_LAST as time_seq to do special search, which 2119 * doesn't lock tree or delayed_refs and search current 2120 * root. It's safe inside commit_transaction(). 2121 */ 2122 ret = btrfs_find_all_roots(trans, fs_info, 2123 record->bytenr, SEQ_LAST, &new_roots, false); 2124 if (ret < 0) 2125 goto cleanup; 2126 if (qgroup_to_skip) { 2127 ulist_del(new_roots, qgroup_to_skip, 0); 2128 ulist_del(record->old_roots, qgroup_to_skip, 2129 0); 2130 } 2131 ret = btrfs_qgroup_account_extent(trans, fs_info, 2132 record->bytenr, record->num_bytes, 2133 record->old_roots, new_roots); 2134 record->old_roots = NULL; 2135 new_roots = NULL; 2136 } 2137 cleanup: 2138 ulist_free(record->old_roots); 2139 ulist_free(new_roots); 2140 new_roots = NULL; 2141 rb_erase(node, &delayed_refs->dirty_extent_root); 2142 kfree(record); 2143 2144 } 2145 return ret; 2146 } 2147 2148 /* 2149 * called from commit_transaction. Writes all changed qgroups to disk. 2150 */ 2151 int btrfs_run_qgroups(struct btrfs_trans_handle *trans, 2152 struct btrfs_fs_info *fs_info) 2153 { 2154 struct btrfs_root *quota_root = fs_info->quota_root; 2155 int ret = 0; 2156 2157 if (!quota_root) 2158 return ret; 2159 2160 spin_lock(&fs_info->qgroup_lock); 2161 while (!list_empty(&fs_info->dirty_qgroups)) { 2162 struct btrfs_qgroup *qgroup; 2163 qgroup = list_first_entry(&fs_info->dirty_qgroups, 2164 struct btrfs_qgroup, dirty); 2165 list_del_init(&qgroup->dirty); 2166 spin_unlock(&fs_info->qgroup_lock); 2167 ret = update_qgroup_info_item(trans, quota_root, qgroup); 2168 if (ret) 2169 fs_info->qgroup_flags |= 2170 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2171 ret = update_qgroup_limit_item(trans, quota_root, qgroup); 2172 if (ret) 2173 fs_info->qgroup_flags |= 2174 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2175 spin_lock(&fs_info->qgroup_lock); 2176 } 2177 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2178 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 2179 else 2180 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 2181 spin_unlock(&fs_info->qgroup_lock); 2182 2183 ret = update_qgroup_status_item(trans, fs_info, quota_root); 2184 if (ret) 2185 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2186 2187 return ret; 2188 } 2189 2190 /* 2191 * Copy the accounting information between qgroups. This is necessary 2192 * when a snapshot or a subvolume is created. Throwing an error will 2193 * cause a transaction abort so we take extra care here to only error 2194 * when a readonly fs is a reasonable outcome. 2195 */ 2196 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 2197 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, 2198 struct btrfs_qgroup_inherit *inherit) 2199 { 2200 int ret = 0; 2201 int i; 2202 u64 *i_qgroups; 2203 struct btrfs_root *quota_root = fs_info->quota_root; 2204 struct btrfs_qgroup *srcgroup; 2205 struct btrfs_qgroup *dstgroup; 2206 u32 level_size = 0; 2207 u64 nums; 2208 2209 mutex_lock(&fs_info->qgroup_ioctl_lock); 2210 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2211 goto out; 2212 2213 if (!quota_root) { 2214 ret = -EINVAL; 2215 goto out; 2216 } 2217 2218 if (inherit) { 2219 i_qgroups = (u64 *)(inherit + 1); 2220 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 2221 2 * inherit->num_excl_copies; 2222 for (i = 0; i < nums; ++i) { 2223 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 2224 2225 /* 2226 * Zero out invalid groups so we can ignore 2227 * them later. 2228 */ 2229 if (!srcgroup || 2230 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 2231 *i_qgroups = 0ULL; 2232 2233 ++i_qgroups; 2234 } 2235 } 2236 2237 /* 2238 * create a tracking group for the subvol itself 2239 */ 2240 ret = add_qgroup_item(trans, quota_root, objectid); 2241 if (ret) 2242 goto out; 2243 2244 if (srcid) { 2245 struct btrfs_root *srcroot; 2246 struct btrfs_key srckey; 2247 2248 srckey.objectid = srcid; 2249 srckey.type = BTRFS_ROOT_ITEM_KEY; 2250 srckey.offset = (u64)-1; 2251 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey); 2252 if (IS_ERR(srcroot)) { 2253 ret = PTR_ERR(srcroot); 2254 goto out; 2255 } 2256 2257 level_size = fs_info->nodesize; 2258 } 2259 2260 /* 2261 * add qgroup to all inherited groups 2262 */ 2263 if (inherit) { 2264 i_qgroups = (u64 *)(inherit + 1); 2265 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { 2266 if (*i_qgroups == 0) 2267 continue; 2268 ret = add_qgroup_relation_item(trans, quota_root, 2269 objectid, *i_qgroups); 2270 if (ret && ret != -EEXIST) 2271 goto out; 2272 ret = add_qgroup_relation_item(trans, quota_root, 2273 *i_qgroups, objectid); 2274 if (ret && ret != -EEXIST) 2275 goto out; 2276 } 2277 ret = 0; 2278 } 2279 2280 2281 spin_lock(&fs_info->qgroup_lock); 2282 2283 dstgroup = add_qgroup_rb(fs_info, objectid); 2284 if (IS_ERR(dstgroup)) { 2285 ret = PTR_ERR(dstgroup); 2286 goto unlock; 2287 } 2288 2289 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 2290 dstgroup->lim_flags = inherit->lim.flags; 2291 dstgroup->max_rfer = inherit->lim.max_rfer; 2292 dstgroup->max_excl = inherit->lim.max_excl; 2293 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 2294 dstgroup->rsv_excl = inherit->lim.rsv_excl; 2295 2296 ret = update_qgroup_limit_item(trans, quota_root, dstgroup); 2297 if (ret) { 2298 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2299 btrfs_info(fs_info, 2300 "unable to update quota limit for %llu", 2301 dstgroup->qgroupid); 2302 goto unlock; 2303 } 2304 } 2305 2306 if (srcid) { 2307 srcgroup = find_qgroup_rb(fs_info, srcid); 2308 if (!srcgroup) 2309 goto unlock; 2310 2311 /* 2312 * We call inherit after we clone the root in order to make sure 2313 * our counts don't go crazy, so at this point the only 2314 * difference between the two roots should be the root node. 2315 */ 2316 dstgroup->rfer = srcgroup->rfer; 2317 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 2318 dstgroup->excl = level_size; 2319 dstgroup->excl_cmpr = level_size; 2320 srcgroup->excl = level_size; 2321 srcgroup->excl_cmpr = level_size; 2322 2323 /* inherit the limit info */ 2324 dstgroup->lim_flags = srcgroup->lim_flags; 2325 dstgroup->max_rfer = srcgroup->max_rfer; 2326 dstgroup->max_excl = srcgroup->max_excl; 2327 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 2328 dstgroup->rsv_excl = srcgroup->rsv_excl; 2329 2330 qgroup_dirty(fs_info, dstgroup); 2331 qgroup_dirty(fs_info, srcgroup); 2332 } 2333 2334 if (!inherit) 2335 goto unlock; 2336 2337 i_qgroups = (u64 *)(inherit + 1); 2338 for (i = 0; i < inherit->num_qgroups; ++i) { 2339 if (*i_qgroups) { 2340 ret = add_relation_rb(fs_info, objectid, *i_qgroups); 2341 if (ret) 2342 goto unlock; 2343 } 2344 ++i_qgroups; 2345 } 2346 2347 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { 2348 struct btrfs_qgroup *src; 2349 struct btrfs_qgroup *dst; 2350 2351 if (!i_qgroups[0] || !i_qgroups[1]) 2352 continue; 2353 2354 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2355 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2356 2357 if (!src || !dst) { 2358 ret = -EINVAL; 2359 goto unlock; 2360 } 2361 2362 dst->rfer = src->rfer - level_size; 2363 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2364 } 2365 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 2366 struct btrfs_qgroup *src; 2367 struct btrfs_qgroup *dst; 2368 2369 if (!i_qgroups[0] || !i_qgroups[1]) 2370 continue; 2371 2372 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2373 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2374 2375 if (!src || !dst) { 2376 ret = -EINVAL; 2377 goto unlock; 2378 } 2379 2380 dst->excl = src->excl + level_size; 2381 dst->excl_cmpr = src->excl_cmpr + level_size; 2382 } 2383 2384 unlock: 2385 spin_unlock(&fs_info->qgroup_lock); 2386 out: 2387 mutex_unlock(&fs_info->qgroup_ioctl_lock); 2388 return ret; 2389 } 2390 2391 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 2392 { 2393 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2394 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 2395 return false; 2396 2397 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 2398 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 2399 return false; 2400 2401 return true; 2402 } 2403 2404 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 2405 enum btrfs_qgroup_rsv_type type) 2406 { 2407 struct btrfs_root *quota_root; 2408 struct btrfs_qgroup *qgroup; 2409 struct btrfs_fs_info *fs_info = root->fs_info; 2410 u64 ref_root = root->root_key.objectid; 2411 int ret = 0; 2412 struct ulist_node *unode; 2413 struct ulist_iterator uiter; 2414 2415 if (!is_fstree(ref_root)) 2416 return 0; 2417 2418 if (num_bytes == 0) 2419 return 0; 2420 2421 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 2422 capable(CAP_SYS_RESOURCE)) 2423 enforce = false; 2424 2425 spin_lock(&fs_info->qgroup_lock); 2426 quota_root = fs_info->quota_root; 2427 if (!quota_root) 2428 goto out; 2429 2430 qgroup = find_qgroup_rb(fs_info, ref_root); 2431 if (!qgroup) 2432 goto out; 2433 2434 /* 2435 * in a first step, we check all affected qgroups if any limits would 2436 * be exceeded 2437 */ 2438 ulist_reinit(fs_info->qgroup_ulist); 2439 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2440 qgroup_to_aux(qgroup), GFP_ATOMIC); 2441 if (ret < 0) 2442 goto out; 2443 ULIST_ITER_INIT(&uiter); 2444 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2445 struct btrfs_qgroup *qg; 2446 struct btrfs_qgroup_list *glist; 2447 2448 qg = unode_aux_to_qgroup(unode); 2449 2450 if (enforce && !qgroup_check_limits(qg, num_bytes)) { 2451 ret = -EDQUOT; 2452 goto out; 2453 } 2454 2455 list_for_each_entry(glist, &qg->groups, next_group) { 2456 ret = ulist_add(fs_info->qgroup_ulist, 2457 glist->group->qgroupid, 2458 qgroup_to_aux(glist->group), GFP_ATOMIC); 2459 if (ret < 0) 2460 goto out; 2461 } 2462 } 2463 ret = 0; 2464 /* 2465 * no limits exceeded, now record the reservation into all qgroups 2466 */ 2467 ULIST_ITER_INIT(&uiter); 2468 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2469 struct btrfs_qgroup *qg; 2470 2471 qg = unode_aux_to_qgroup(unode); 2472 2473 trace_qgroup_update_reserve(fs_info, qg, num_bytes, type); 2474 qgroup_rsv_add(fs_info, qg, num_bytes, type); 2475 } 2476 2477 out: 2478 spin_unlock(&fs_info->qgroup_lock); 2479 return ret; 2480 } 2481 2482 /* 2483 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 2484 * qgroup). 2485 * 2486 * Will handle all higher level qgroup too. 2487 * 2488 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 2489 * This special case is only used for META_PERTRANS type. 2490 */ 2491 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 2492 u64 ref_root, u64 num_bytes, 2493 enum btrfs_qgroup_rsv_type type) 2494 { 2495 struct btrfs_root *quota_root; 2496 struct btrfs_qgroup *qgroup; 2497 struct ulist_node *unode; 2498 struct ulist_iterator uiter; 2499 int ret = 0; 2500 2501 if (!is_fstree(ref_root)) 2502 return; 2503 2504 if (num_bytes == 0) 2505 return; 2506 2507 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 2508 WARN(1, "%s: Invalid type to free", __func__); 2509 return; 2510 } 2511 spin_lock(&fs_info->qgroup_lock); 2512 2513 quota_root = fs_info->quota_root; 2514 if (!quota_root) 2515 goto out; 2516 2517 qgroup = find_qgroup_rb(fs_info, ref_root); 2518 if (!qgroup) 2519 goto out; 2520 2521 if (num_bytes == (u64)-1) 2522 /* 2523 * We're freeing all pertrans rsv, get reserved value from 2524 * level 0 qgroup as real num_bytes to free. 2525 */ 2526 num_bytes = qgroup->rsv.values[type]; 2527 2528 ulist_reinit(fs_info->qgroup_ulist); 2529 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2530 qgroup_to_aux(qgroup), GFP_ATOMIC); 2531 if (ret < 0) 2532 goto out; 2533 ULIST_ITER_INIT(&uiter); 2534 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2535 struct btrfs_qgroup *qg; 2536 struct btrfs_qgroup_list *glist; 2537 2538 qg = unode_aux_to_qgroup(unode); 2539 2540 trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type); 2541 qgroup_rsv_release(fs_info, qg, num_bytes, type); 2542 2543 list_for_each_entry(glist, &qg->groups, next_group) { 2544 ret = ulist_add(fs_info->qgroup_ulist, 2545 glist->group->qgroupid, 2546 qgroup_to_aux(glist->group), GFP_ATOMIC); 2547 if (ret < 0) 2548 goto out; 2549 } 2550 } 2551 2552 out: 2553 spin_unlock(&fs_info->qgroup_lock); 2554 } 2555 2556 /* 2557 * returns < 0 on error, 0 when more leafs are to be scanned. 2558 * returns 1 when done. 2559 */ 2560 static int 2561 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 2562 struct btrfs_trans_handle *trans) 2563 { 2564 struct btrfs_key found; 2565 struct extent_buffer *scratch_leaf = NULL; 2566 struct ulist *roots = NULL; 2567 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); 2568 u64 num_bytes; 2569 int slot; 2570 int ret; 2571 2572 mutex_lock(&fs_info->qgroup_rescan_lock); 2573 ret = btrfs_search_slot_for_read(fs_info->extent_root, 2574 &fs_info->qgroup_rescan_progress, 2575 path, 1, 0); 2576 2577 btrfs_debug(fs_info, 2578 "current progress key (%llu %u %llu), search_slot ret %d", 2579 fs_info->qgroup_rescan_progress.objectid, 2580 fs_info->qgroup_rescan_progress.type, 2581 fs_info->qgroup_rescan_progress.offset, ret); 2582 2583 if (ret) { 2584 /* 2585 * The rescan is about to end, we will not be scanning any 2586 * further blocks. We cannot unset the RESCAN flag here, because 2587 * we want to commit the transaction if everything went well. 2588 * To make the live accounting work in this phase, we set our 2589 * scan progress pointer such that every real extent objectid 2590 * will be smaller. 2591 */ 2592 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 2593 btrfs_release_path(path); 2594 mutex_unlock(&fs_info->qgroup_rescan_lock); 2595 return ret; 2596 } 2597 2598 btrfs_item_key_to_cpu(path->nodes[0], &found, 2599 btrfs_header_nritems(path->nodes[0]) - 1); 2600 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 2601 2602 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2603 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 2604 if (!scratch_leaf) { 2605 ret = -ENOMEM; 2606 mutex_unlock(&fs_info->qgroup_rescan_lock); 2607 goto out; 2608 } 2609 extent_buffer_get(scratch_leaf); 2610 btrfs_tree_read_lock(scratch_leaf); 2611 btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK); 2612 slot = path->slots[0]; 2613 btrfs_release_path(path); 2614 mutex_unlock(&fs_info->qgroup_rescan_lock); 2615 2616 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 2617 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 2618 if (found.type != BTRFS_EXTENT_ITEM_KEY && 2619 found.type != BTRFS_METADATA_ITEM_KEY) 2620 continue; 2621 if (found.type == BTRFS_METADATA_ITEM_KEY) 2622 num_bytes = fs_info->nodesize; 2623 else 2624 num_bytes = found.offset; 2625 2626 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, 2627 &roots, false); 2628 if (ret < 0) 2629 goto out; 2630 /* For rescan, just pass old_roots as NULL */ 2631 ret = btrfs_qgroup_account_extent(trans, fs_info, 2632 found.objectid, num_bytes, NULL, roots); 2633 if (ret < 0) 2634 goto out; 2635 } 2636 out: 2637 if (scratch_leaf) { 2638 btrfs_tree_read_unlock_blocking(scratch_leaf); 2639 free_extent_buffer(scratch_leaf); 2640 } 2641 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2642 2643 return ret; 2644 } 2645 2646 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 2647 { 2648 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 2649 qgroup_rescan_work); 2650 struct btrfs_path *path; 2651 struct btrfs_trans_handle *trans = NULL; 2652 int err = -ENOMEM; 2653 int ret = 0; 2654 2655 path = btrfs_alloc_path(); 2656 if (!path) 2657 goto out; 2658 2659 err = 0; 2660 while (!err && !btrfs_fs_closing(fs_info)) { 2661 trans = btrfs_start_transaction(fs_info->fs_root, 0); 2662 if (IS_ERR(trans)) { 2663 err = PTR_ERR(trans); 2664 break; 2665 } 2666 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 2667 err = -EINTR; 2668 } else { 2669 err = qgroup_rescan_leaf(fs_info, path, trans); 2670 } 2671 if (err > 0) 2672 btrfs_commit_transaction(trans); 2673 else 2674 btrfs_end_transaction(trans); 2675 } 2676 2677 out: 2678 btrfs_free_path(path); 2679 2680 mutex_lock(&fs_info->qgroup_rescan_lock); 2681 if (!btrfs_fs_closing(fs_info)) 2682 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2683 2684 if (err > 0 && 2685 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 2686 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2687 } else if (err < 0) { 2688 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2689 } 2690 mutex_unlock(&fs_info->qgroup_rescan_lock); 2691 2692 /* 2693 * only update status, since the previous part has already updated the 2694 * qgroup info. 2695 */ 2696 trans = btrfs_start_transaction(fs_info->quota_root, 1); 2697 if (IS_ERR(trans)) { 2698 err = PTR_ERR(trans); 2699 btrfs_err(fs_info, 2700 "fail to start transaction for status update: %d", 2701 err); 2702 goto done; 2703 } 2704 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); 2705 if (ret < 0) { 2706 err = ret; 2707 btrfs_err(fs_info, "fail to update qgroup status: %d", err); 2708 } 2709 btrfs_end_transaction(trans); 2710 2711 if (btrfs_fs_closing(fs_info)) { 2712 btrfs_info(fs_info, "qgroup scan paused"); 2713 } else if (err >= 0) { 2714 btrfs_info(fs_info, "qgroup scan completed%s", 2715 err > 0 ? " (inconsistency flag cleared)" : ""); 2716 } else { 2717 btrfs_err(fs_info, "qgroup scan failed with %d", err); 2718 } 2719 2720 done: 2721 mutex_lock(&fs_info->qgroup_rescan_lock); 2722 fs_info->qgroup_rescan_running = false; 2723 mutex_unlock(&fs_info->qgroup_rescan_lock); 2724 complete_all(&fs_info->qgroup_rescan_completion); 2725 } 2726 2727 /* 2728 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 2729 * memory required for the rescan context. 2730 */ 2731 static int 2732 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 2733 int init_flags) 2734 { 2735 int ret = 0; 2736 2737 if (!init_flags && 2738 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) || 2739 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) { 2740 ret = -EINVAL; 2741 goto err; 2742 } 2743 2744 mutex_lock(&fs_info->qgroup_rescan_lock); 2745 spin_lock(&fs_info->qgroup_lock); 2746 2747 if (init_flags) { 2748 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 2749 ret = -EINPROGRESS; 2750 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 2751 ret = -EINVAL; 2752 2753 if (ret) { 2754 spin_unlock(&fs_info->qgroup_lock); 2755 mutex_unlock(&fs_info->qgroup_rescan_lock); 2756 goto err; 2757 } 2758 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2759 } 2760 2761 memset(&fs_info->qgroup_rescan_progress, 0, 2762 sizeof(fs_info->qgroup_rescan_progress)); 2763 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 2764 init_completion(&fs_info->qgroup_rescan_completion); 2765 fs_info->qgroup_rescan_running = true; 2766 2767 spin_unlock(&fs_info->qgroup_lock); 2768 mutex_unlock(&fs_info->qgroup_rescan_lock); 2769 2770 memset(&fs_info->qgroup_rescan_work, 0, 2771 sizeof(fs_info->qgroup_rescan_work)); 2772 btrfs_init_work(&fs_info->qgroup_rescan_work, 2773 btrfs_qgroup_rescan_helper, 2774 btrfs_qgroup_rescan_worker, NULL, NULL); 2775 2776 if (ret) { 2777 err: 2778 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret); 2779 return ret; 2780 } 2781 2782 return 0; 2783 } 2784 2785 static void 2786 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 2787 { 2788 struct rb_node *n; 2789 struct btrfs_qgroup *qgroup; 2790 2791 spin_lock(&fs_info->qgroup_lock); 2792 /* clear all current qgroup tracking information */ 2793 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 2794 qgroup = rb_entry(n, struct btrfs_qgroup, node); 2795 qgroup->rfer = 0; 2796 qgroup->rfer_cmpr = 0; 2797 qgroup->excl = 0; 2798 qgroup->excl_cmpr = 0; 2799 } 2800 spin_unlock(&fs_info->qgroup_lock); 2801 } 2802 2803 int 2804 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 2805 { 2806 int ret = 0; 2807 struct btrfs_trans_handle *trans; 2808 2809 ret = qgroup_rescan_init(fs_info, 0, 1); 2810 if (ret) 2811 return ret; 2812 2813 /* 2814 * We have set the rescan_progress to 0, which means no more 2815 * delayed refs will be accounted by btrfs_qgroup_account_ref. 2816 * However, btrfs_qgroup_account_ref may be right after its call 2817 * to btrfs_find_all_roots, in which case it would still do the 2818 * accounting. 2819 * To solve this, we're committing the transaction, which will 2820 * ensure we run all delayed refs and only after that, we are 2821 * going to clear all tracking information for a clean start. 2822 */ 2823 2824 trans = btrfs_join_transaction(fs_info->fs_root); 2825 if (IS_ERR(trans)) { 2826 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2827 return PTR_ERR(trans); 2828 } 2829 ret = btrfs_commit_transaction(trans); 2830 if (ret) { 2831 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2832 return ret; 2833 } 2834 2835 qgroup_rescan_zero_tracking(fs_info); 2836 2837 btrfs_queue_work(fs_info->qgroup_rescan_workers, 2838 &fs_info->qgroup_rescan_work); 2839 2840 return 0; 2841 } 2842 2843 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 2844 bool interruptible) 2845 { 2846 int running; 2847 int ret = 0; 2848 2849 mutex_lock(&fs_info->qgroup_rescan_lock); 2850 spin_lock(&fs_info->qgroup_lock); 2851 running = fs_info->qgroup_rescan_running; 2852 spin_unlock(&fs_info->qgroup_lock); 2853 mutex_unlock(&fs_info->qgroup_rescan_lock); 2854 2855 if (!running) 2856 return 0; 2857 2858 if (interruptible) 2859 ret = wait_for_completion_interruptible( 2860 &fs_info->qgroup_rescan_completion); 2861 else 2862 wait_for_completion(&fs_info->qgroup_rescan_completion); 2863 2864 return ret; 2865 } 2866 2867 /* 2868 * this is only called from open_ctree where we're still single threaded, thus 2869 * locking is omitted here. 2870 */ 2871 void 2872 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 2873 { 2874 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 2875 btrfs_queue_work(fs_info->qgroup_rescan_workers, 2876 &fs_info->qgroup_rescan_work); 2877 } 2878 2879 /* 2880 * Reserve qgroup space for range [start, start + len). 2881 * 2882 * This function will either reserve space from related qgroups or doing 2883 * nothing if the range is already reserved. 2884 * 2885 * Return 0 for successful reserve 2886 * Return <0 for error (including -EQUOT) 2887 * 2888 * NOTE: this function may sleep for memory allocation. 2889 * if btrfs_qgroup_reserve_data() is called multiple times with 2890 * same @reserved, caller must ensure when error happens it's OK 2891 * to free *ALL* reserved space. 2892 */ 2893 int btrfs_qgroup_reserve_data(struct inode *inode, 2894 struct extent_changeset **reserved_ret, u64 start, 2895 u64 len) 2896 { 2897 struct btrfs_root *root = BTRFS_I(inode)->root; 2898 struct ulist_node *unode; 2899 struct ulist_iterator uiter; 2900 struct extent_changeset *reserved; 2901 u64 orig_reserved; 2902 u64 to_reserve; 2903 int ret; 2904 2905 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || 2906 !is_fstree(root->objectid) || len == 0) 2907 return 0; 2908 2909 /* @reserved parameter is mandatory for qgroup */ 2910 if (WARN_ON(!reserved_ret)) 2911 return -EINVAL; 2912 if (!*reserved_ret) { 2913 *reserved_ret = extent_changeset_alloc(); 2914 if (!*reserved_ret) 2915 return -ENOMEM; 2916 } 2917 reserved = *reserved_ret; 2918 /* Record already reserved space */ 2919 orig_reserved = reserved->bytes_changed; 2920 ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 2921 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 2922 2923 /* Newly reserved space */ 2924 to_reserve = reserved->bytes_changed - orig_reserved; 2925 trace_btrfs_qgroup_reserve_data(inode, start, len, 2926 to_reserve, QGROUP_RESERVE); 2927 if (ret < 0) 2928 goto cleanup; 2929 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 2930 if (ret < 0) 2931 goto cleanup; 2932 2933 return ret; 2934 2935 cleanup: 2936 /* cleanup *ALL* already reserved ranges */ 2937 ULIST_ITER_INIT(&uiter); 2938 while ((unode = ulist_next(&reserved->range_changed, &uiter))) 2939 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, 2940 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL); 2941 extent_changeset_release(reserved); 2942 return ret; 2943 } 2944 2945 /* Free ranges specified by @reserved, normally in error path */ 2946 static int qgroup_free_reserved_data(struct inode *inode, 2947 struct extent_changeset *reserved, u64 start, u64 len) 2948 { 2949 struct btrfs_root *root = BTRFS_I(inode)->root; 2950 struct ulist_node *unode; 2951 struct ulist_iterator uiter; 2952 struct extent_changeset changeset; 2953 int freed = 0; 2954 int ret; 2955 2956 extent_changeset_init(&changeset); 2957 len = round_up(start + len, root->fs_info->sectorsize); 2958 start = round_down(start, root->fs_info->sectorsize); 2959 2960 ULIST_ITER_INIT(&uiter); 2961 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 2962 u64 range_start = unode->val; 2963 /* unode->aux is the inclusive end */ 2964 u64 range_len = unode->aux - range_start + 1; 2965 u64 free_start; 2966 u64 free_len; 2967 2968 extent_changeset_release(&changeset); 2969 2970 /* Only free range in range [start, start + len) */ 2971 if (range_start >= start + len || 2972 range_start + range_len <= start) 2973 continue; 2974 free_start = max(range_start, start); 2975 free_len = min(start + len, range_start + range_len) - 2976 free_start; 2977 /* 2978 * TODO: To also modify reserved->ranges_reserved to reflect 2979 * the modification. 2980 * 2981 * However as long as we free qgroup reserved according to 2982 * EXTENT_QGROUP_RESERVED, we won't double free. 2983 * So not need to rush. 2984 */ 2985 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree, 2986 free_start, free_start + free_len - 1, 2987 EXTENT_QGROUP_RESERVED, &changeset); 2988 if (ret < 0) 2989 goto out; 2990 freed += changeset.bytes_changed; 2991 } 2992 btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed, 2993 BTRFS_QGROUP_RSV_DATA); 2994 ret = freed; 2995 out: 2996 extent_changeset_release(&changeset); 2997 return ret; 2998 } 2999 3000 static int __btrfs_qgroup_release_data(struct inode *inode, 3001 struct extent_changeset *reserved, u64 start, u64 len, 3002 int free) 3003 { 3004 struct extent_changeset changeset; 3005 int trace_op = QGROUP_RELEASE; 3006 int ret; 3007 3008 /* In release case, we shouldn't have @reserved */ 3009 WARN_ON(!free && reserved); 3010 if (free && reserved) 3011 return qgroup_free_reserved_data(inode, reserved, start, len); 3012 extent_changeset_init(&changeset); 3013 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 3014 start + len -1, EXTENT_QGROUP_RESERVED, &changeset); 3015 if (ret < 0) 3016 goto out; 3017 3018 if (free) 3019 trace_op = QGROUP_FREE; 3020 trace_btrfs_qgroup_release_data(inode, start, len, 3021 changeset.bytes_changed, trace_op); 3022 if (free) 3023 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, 3024 BTRFS_I(inode)->root->objectid, 3025 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3026 ret = changeset.bytes_changed; 3027 out: 3028 extent_changeset_release(&changeset); 3029 return ret; 3030 } 3031 3032 /* 3033 * Free a reserved space range from io_tree and related qgroups 3034 * 3035 * Should be called when a range of pages get invalidated before reaching disk. 3036 * Or for error cleanup case. 3037 * if @reserved is given, only reserved range in [@start, @start + @len) will 3038 * be freed. 3039 * 3040 * For data written to disk, use btrfs_qgroup_release_data(). 3041 * 3042 * NOTE: This function may sleep for memory allocation. 3043 */ 3044 int btrfs_qgroup_free_data(struct inode *inode, 3045 struct extent_changeset *reserved, u64 start, u64 len) 3046 { 3047 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); 3048 } 3049 3050 /* 3051 * Release a reserved space range from io_tree only. 3052 * 3053 * Should be called when a range of pages get written to disk and corresponding 3054 * FILE_EXTENT is inserted into corresponding root. 3055 * 3056 * Since new qgroup accounting framework will only update qgroup numbers at 3057 * commit_transaction() time, its reserved space shouldn't be freed from 3058 * related qgroups. 3059 * 3060 * But we should release the range from io_tree, to allow further write to be 3061 * COWed. 3062 * 3063 * NOTE: This function may sleep for memory allocation. 3064 */ 3065 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) 3066 { 3067 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); 3068 } 3069 3070 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3071 enum btrfs_qgroup_rsv_type type) 3072 { 3073 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3074 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3075 return; 3076 if (num_bytes == 0) 3077 return; 3078 3079 spin_lock(&root->qgroup_meta_rsv_lock); 3080 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 3081 root->qgroup_meta_rsv_prealloc += num_bytes; 3082 else 3083 root->qgroup_meta_rsv_pertrans += num_bytes; 3084 spin_unlock(&root->qgroup_meta_rsv_lock); 3085 } 3086 3087 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3088 enum btrfs_qgroup_rsv_type type) 3089 { 3090 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3091 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3092 return 0; 3093 if (num_bytes == 0) 3094 return 0; 3095 3096 spin_lock(&root->qgroup_meta_rsv_lock); 3097 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 3098 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 3099 num_bytes); 3100 root->qgroup_meta_rsv_prealloc -= num_bytes; 3101 } else { 3102 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 3103 num_bytes); 3104 root->qgroup_meta_rsv_pertrans -= num_bytes; 3105 } 3106 spin_unlock(&root->qgroup_meta_rsv_lock); 3107 return num_bytes; 3108 } 3109 3110 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3111 enum btrfs_qgroup_rsv_type type, bool enforce) 3112 { 3113 struct btrfs_fs_info *fs_info = root->fs_info; 3114 int ret; 3115 3116 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3117 !is_fstree(root->objectid) || num_bytes == 0) 3118 return 0; 3119 3120 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3121 trace_qgroup_meta_reserve(root, type, (s64)num_bytes); 3122 ret = qgroup_reserve(root, num_bytes, enforce, type); 3123 if (ret < 0) 3124 return ret; 3125 /* 3126 * Record what we have reserved into root. 3127 * 3128 * To avoid quota disabled->enabled underflow. 3129 * In that case, we may try to free space we haven't reserved 3130 * (since quota was disabled), so record what we reserved into root. 3131 * And ensure later release won't underflow this number. 3132 */ 3133 add_root_meta_rsv(root, num_bytes, type); 3134 return ret; 3135 } 3136 3137 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 3138 { 3139 struct btrfs_fs_info *fs_info = root->fs_info; 3140 3141 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3142 !is_fstree(root->objectid)) 3143 return; 3144 3145 /* TODO: Update trace point to handle such free */ 3146 trace_qgroup_meta_free_all_pertrans(root); 3147 /* Special value -1 means to free all reserved space */ 3148 btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1, 3149 BTRFS_QGROUP_RSV_META_PERTRANS); 3150 } 3151 3152 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 3153 enum btrfs_qgroup_rsv_type type) 3154 { 3155 struct btrfs_fs_info *fs_info = root->fs_info; 3156 3157 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3158 !is_fstree(root->objectid)) 3159 return; 3160 3161 /* 3162 * reservation for META_PREALLOC can happen before quota is enabled, 3163 * which can lead to underflow. 3164 * Here ensure we will only free what we really have reserved. 3165 */ 3166 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 3167 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3168 trace_qgroup_meta_reserve(root, type, -(s64)num_bytes); 3169 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type); 3170 } 3171 3172 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 3173 int num_bytes) 3174 { 3175 struct btrfs_root *quota_root = fs_info->quota_root; 3176 struct btrfs_qgroup *qgroup; 3177 struct ulist_node *unode; 3178 struct ulist_iterator uiter; 3179 int ret = 0; 3180 3181 if (num_bytes == 0) 3182 return; 3183 if (!quota_root) 3184 return; 3185 3186 spin_lock(&fs_info->qgroup_lock); 3187 qgroup = find_qgroup_rb(fs_info, ref_root); 3188 if (!qgroup) 3189 goto out; 3190 ulist_reinit(fs_info->qgroup_ulist); 3191 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3192 qgroup_to_aux(qgroup), GFP_ATOMIC); 3193 if (ret < 0) 3194 goto out; 3195 ULIST_ITER_INIT(&uiter); 3196 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3197 struct btrfs_qgroup *qg; 3198 struct btrfs_qgroup_list *glist; 3199 3200 qg = unode_aux_to_qgroup(unode); 3201 3202 qgroup_rsv_release(fs_info, qg, num_bytes, 3203 BTRFS_QGROUP_RSV_META_PREALLOC); 3204 qgroup_rsv_add(fs_info, qg, num_bytes, 3205 BTRFS_QGROUP_RSV_META_PERTRANS); 3206 list_for_each_entry(glist, &qg->groups, next_group) { 3207 ret = ulist_add(fs_info->qgroup_ulist, 3208 glist->group->qgroupid, 3209 qgroup_to_aux(glist->group), GFP_ATOMIC); 3210 if (ret < 0) 3211 goto out; 3212 } 3213 } 3214 out: 3215 spin_unlock(&fs_info->qgroup_lock); 3216 } 3217 3218 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 3219 { 3220 struct btrfs_fs_info *fs_info = root->fs_info; 3221 3222 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3223 !is_fstree(root->objectid)) 3224 return; 3225 /* Same as btrfs_qgroup_free_meta_prealloc() */ 3226 num_bytes = sub_root_meta_rsv(root, num_bytes, 3227 BTRFS_QGROUP_RSV_META_PREALLOC); 3228 trace_qgroup_meta_convert(root, num_bytes); 3229 qgroup_convert_meta(fs_info, root->objectid, num_bytes); 3230 } 3231 3232 /* 3233 * Check qgroup reserved space leaking, normally at destroy inode 3234 * time 3235 */ 3236 void btrfs_qgroup_check_reserved_leak(struct inode *inode) 3237 { 3238 struct extent_changeset changeset; 3239 struct ulist_node *unode; 3240 struct ulist_iterator iter; 3241 int ret; 3242 3243 extent_changeset_init(&changeset); 3244 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, 3245 EXTENT_QGROUP_RESERVED, &changeset); 3246 3247 WARN_ON(ret < 0); 3248 if (WARN_ON(changeset.bytes_changed)) { 3249 ULIST_ITER_INIT(&iter); 3250 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 3251 btrfs_warn(BTRFS_I(inode)->root->fs_info, 3252 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", 3253 inode->i_ino, unode->val, unode->aux); 3254 } 3255 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, 3256 BTRFS_I(inode)->root->objectid, 3257 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3258 3259 } 3260 extent_changeset_release(&changeset); 3261 } 3262