1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 15 #include "ctree.h" 16 #include "transaction.h" 17 #include "disk-io.h" 18 #include "locking.h" 19 #include "ulist.h" 20 #include "backref.h" 21 #include "extent_io.h" 22 #include "qgroup.h" 23 #include "block-group.h" 24 #include "sysfs.h" 25 26 /* TODO XXX FIXME 27 * - subvol delete -> delete when ref goes to 0? delete limits also? 28 * - reorganize keys 29 * - compressed 30 * - sync 31 * - copy also limits on subvol creation 32 * - limit 33 * - caches for ulists 34 * - performance benchmarks 35 * - check all ioctl parameters 36 */ 37 38 /* 39 * Helpers to access qgroup reservation 40 * 41 * Callers should ensure the lock context and type are valid 42 */ 43 44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 45 { 46 u64 ret = 0; 47 int i; 48 49 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 50 ret += qgroup->rsv.values[i]; 51 52 return ret; 53 } 54 55 #ifdef CONFIG_BTRFS_DEBUG 56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 57 { 58 if (type == BTRFS_QGROUP_RSV_DATA) 59 return "data"; 60 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 61 return "meta_pertrans"; 62 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 63 return "meta_prealloc"; 64 return NULL; 65 } 66 #endif 67 68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 69 struct btrfs_qgroup *qgroup, u64 num_bytes, 70 enum btrfs_qgroup_rsv_type type) 71 { 72 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 73 qgroup->rsv.values[type] += num_bytes; 74 } 75 76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 77 struct btrfs_qgroup *qgroup, u64 num_bytes, 78 enum btrfs_qgroup_rsv_type type) 79 { 80 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 81 if (qgroup->rsv.values[type] >= num_bytes) { 82 qgroup->rsv.values[type] -= num_bytes; 83 return; 84 } 85 #ifdef CONFIG_BTRFS_DEBUG 86 WARN_RATELIMIT(1, 87 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 88 qgroup->qgroupid, qgroup_rsv_type_str(type), 89 qgroup->rsv.values[type], num_bytes); 90 #endif 91 qgroup->rsv.values[type] = 0; 92 } 93 94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 95 struct btrfs_qgroup *dest, 96 struct btrfs_qgroup *src) 97 { 98 int i; 99 100 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 101 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 102 } 103 104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 105 struct btrfs_qgroup *dest, 106 struct btrfs_qgroup *src) 107 { 108 int i; 109 110 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 111 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 112 } 113 114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 115 int mod) 116 { 117 if (qg->old_refcnt < seq) 118 qg->old_refcnt = seq; 119 qg->old_refcnt += mod; 120 } 121 122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 123 int mod) 124 { 125 if (qg->new_refcnt < seq) 126 qg->new_refcnt = seq; 127 qg->new_refcnt += mod; 128 } 129 130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 131 { 132 if (qg->old_refcnt < seq) 133 return 0; 134 return qg->old_refcnt - seq; 135 } 136 137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 138 { 139 if (qg->new_refcnt < seq) 140 return 0; 141 return qg->new_refcnt - seq; 142 } 143 144 /* 145 * glue structure to represent the relations between qgroups. 146 */ 147 struct btrfs_qgroup_list { 148 struct list_head next_group; 149 struct list_head next_member; 150 struct btrfs_qgroup *group; 151 struct btrfs_qgroup *member; 152 }; 153 154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) 155 { 156 return (u64)(uintptr_t)qg; 157 } 158 159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) 160 { 161 return (struct btrfs_qgroup *)(uintptr_t)n->aux; 162 } 163 164 static int 165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 166 int init_flags); 167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 168 169 /* must be called with qgroup_ioctl_lock held */ 170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 171 u64 qgroupid) 172 { 173 struct rb_node *n = fs_info->qgroup_tree.rb_node; 174 struct btrfs_qgroup *qgroup; 175 176 while (n) { 177 qgroup = rb_entry(n, struct btrfs_qgroup, node); 178 if (qgroup->qgroupid < qgroupid) 179 n = n->rb_left; 180 else if (qgroup->qgroupid > qgroupid) 181 n = n->rb_right; 182 else 183 return qgroup; 184 } 185 return NULL; 186 } 187 188 /* must be called with qgroup_lock held */ 189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 190 u64 qgroupid) 191 { 192 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 193 struct rb_node *parent = NULL; 194 struct btrfs_qgroup *qgroup; 195 196 while (*p) { 197 parent = *p; 198 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 199 200 if (qgroup->qgroupid < qgroupid) 201 p = &(*p)->rb_left; 202 else if (qgroup->qgroupid > qgroupid) 203 p = &(*p)->rb_right; 204 else 205 return qgroup; 206 } 207 208 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 209 if (!qgroup) 210 return ERR_PTR(-ENOMEM); 211 212 qgroup->qgroupid = qgroupid; 213 INIT_LIST_HEAD(&qgroup->groups); 214 INIT_LIST_HEAD(&qgroup->members); 215 INIT_LIST_HEAD(&qgroup->dirty); 216 217 rb_link_node(&qgroup->node, parent, p); 218 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 219 220 return qgroup; 221 } 222 223 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, 224 struct btrfs_qgroup *qgroup) 225 { 226 struct btrfs_qgroup_list *list; 227 228 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 229 list_del(&qgroup->dirty); 230 while (!list_empty(&qgroup->groups)) { 231 list = list_first_entry(&qgroup->groups, 232 struct btrfs_qgroup_list, next_group); 233 list_del(&list->next_group); 234 list_del(&list->next_member); 235 kfree(list); 236 } 237 238 while (!list_empty(&qgroup->members)) { 239 list = list_first_entry(&qgroup->members, 240 struct btrfs_qgroup_list, next_member); 241 list_del(&list->next_group); 242 list_del(&list->next_member); 243 kfree(list); 244 } 245 kfree(qgroup); 246 } 247 248 /* must be called with qgroup_lock held */ 249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 250 { 251 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 252 253 if (!qgroup) 254 return -ENOENT; 255 256 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 257 __del_qgroup_rb(fs_info, qgroup); 258 return 0; 259 } 260 261 /* must be called with qgroup_lock held */ 262 static int add_relation_rb(struct btrfs_fs_info *fs_info, 263 u64 memberid, u64 parentid) 264 { 265 struct btrfs_qgroup *member; 266 struct btrfs_qgroup *parent; 267 struct btrfs_qgroup_list *list; 268 269 member = find_qgroup_rb(fs_info, memberid); 270 parent = find_qgroup_rb(fs_info, parentid); 271 if (!member || !parent) 272 return -ENOENT; 273 274 list = kzalloc(sizeof(*list), GFP_ATOMIC); 275 if (!list) 276 return -ENOMEM; 277 278 list->group = parent; 279 list->member = member; 280 list_add_tail(&list->next_group, &member->groups); 281 list_add_tail(&list->next_member, &parent->members); 282 283 return 0; 284 } 285 286 /* must be called with qgroup_lock held */ 287 static int del_relation_rb(struct btrfs_fs_info *fs_info, 288 u64 memberid, u64 parentid) 289 { 290 struct btrfs_qgroup *member; 291 struct btrfs_qgroup *parent; 292 struct btrfs_qgroup_list *list; 293 294 member = find_qgroup_rb(fs_info, memberid); 295 parent = find_qgroup_rb(fs_info, parentid); 296 if (!member || !parent) 297 return -ENOENT; 298 299 list_for_each_entry(list, &member->groups, next_group) { 300 if (list->group == parent) { 301 list_del(&list->next_group); 302 list_del(&list->next_member); 303 kfree(list); 304 return 0; 305 } 306 } 307 return -ENOENT; 308 } 309 310 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 311 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 312 u64 rfer, u64 excl) 313 { 314 struct btrfs_qgroup *qgroup; 315 316 qgroup = find_qgroup_rb(fs_info, qgroupid); 317 if (!qgroup) 318 return -EINVAL; 319 if (qgroup->rfer != rfer || qgroup->excl != excl) 320 return -EINVAL; 321 return 0; 322 } 323 #endif 324 325 /* 326 * The full config is read in one go, only called from open_ctree() 327 * It doesn't use any locking, as at this point we're still single-threaded 328 */ 329 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 330 { 331 struct btrfs_key key; 332 struct btrfs_key found_key; 333 struct btrfs_root *quota_root = fs_info->quota_root; 334 struct btrfs_path *path = NULL; 335 struct extent_buffer *l; 336 int slot; 337 int ret = 0; 338 u64 flags = 0; 339 u64 rescan_progress = 0; 340 341 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 342 return 0; 343 344 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 345 if (!fs_info->qgroup_ulist) { 346 ret = -ENOMEM; 347 goto out; 348 } 349 350 path = btrfs_alloc_path(); 351 if (!path) { 352 ret = -ENOMEM; 353 goto out; 354 } 355 356 ret = btrfs_sysfs_add_qgroups(fs_info); 357 if (ret < 0) 358 goto out; 359 /* default this to quota off, in case no status key is found */ 360 fs_info->qgroup_flags = 0; 361 362 /* 363 * pass 1: read status, all qgroup infos and limits 364 */ 365 key.objectid = 0; 366 key.type = 0; 367 key.offset = 0; 368 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 369 if (ret) 370 goto out; 371 372 while (1) { 373 struct btrfs_qgroup *qgroup; 374 375 slot = path->slots[0]; 376 l = path->nodes[0]; 377 btrfs_item_key_to_cpu(l, &found_key, slot); 378 379 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 380 struct btrfs_qgroup_status_item *ptr; 381 382 ptr = btrfs_item_ptr(l, slot, 383 struct btrfs_qgroup_status_item); 384 385 if (btrfs_qgroup_status_version(l, ptr) != 386 BTRFS_QGROUP_STATUS_VERSION) { 387 btrfs_err(fs_info, 388 "old qgroup version, quota disabled"); 389 goto out; 390 } 391 if (btrfs_qgroup_status_generation(l, ptr) != 392 fs_info->generation) { 393 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 394 btrfs_err(fs_info, 395 "qgroup generation mismatch, marked as inconsistent"); 396 } 397 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 398 ptr); 399 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 400 goto next1; 401 } 402 403 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 404 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 405 goto next1; 406 407 qgroup = find_qgroup_rb(fs_info, found_key.offset); 408 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 409 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 410 btrfs_err(fs_info, "inconsistent qgroup config"); 411 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 412 } 413 if (!qgroup) { 414 qgroup = add_qgroup_rb(fs_info, found_key.offset); 415 if (IS_ERR(qgroup)) { 416 ret = PTR_ERR(qgroup); 417 goto out; 418 } 419 } 420 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 421 if (ret < 0) 422 goto out; 423 424 switch (found_key.type) { 425 case BTRFS_QGROUP_INFO_KEY: { 426 struct btrfs_qgroup_info_item *ptr; 427 428 ptr = btrfs_item_ptr(l, slot, 429 struct btrfs_qgroup_info_item); 430 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 431 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 432 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 433 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 434 /* generation currently unused */ 435 break; 436 } 437 case BTRFS_QGROUP_LIMIT_KEY: { 438 struct btrfs_qgroup_limit_item *ptr; 439 440 ptr = btrfs_item_ptr(l, slot, 441 struct btrfs_qgroup_limit_item); 442 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 443 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 444 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 445 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 446 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 447 break; 448 } 449 } 450 next1: 451 ret = btrfs_next_item(quota_root, path); 452 if (ret < 0) 453 goto out; 454 if (ret) 455 break; 456 } 457 btrfs_release_path(path); 458 459 /* 460 * pass 2: read all qgroup relations 461 */ 462 key.objectid = 0; 463 key.type = BTRFS_QGROUP_RELATION_KEY; 464 key.offset = 0; 465 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 466 if (ret) 467 goto out; 468 while (1) { 469 slot = path->slots[0]; 470 l = path->nodes[0]; 471 btrfs_item_key_to_cpu(l, &found_key, slot); 472 473 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 474 goto next2; 475 476 if (found_key.objectid > found_key.offset) { 477 /* parent <- member, not needed to build config */ 478 /* FIXME should we omit the key completely? */ 479 goto next2; 480 } 481 482 ret = add_relation_rb(fs_info, found_key.objectid, 483 found_key.offset); 484 if (ret == -ENOENT) { 485 btrfs_warn(fs_info, 486 "orphan qgroup relation 0x%llx->0x%llx", 487 found_key.objectid, found_key.offset); 488 ret = 0; /* ignore the error */ 489 } 490 if (ret) 491 goto out; 492 next2: 493 ret = btrfs_next_item(quota_root, path); 494 if (ret < 0) 495 goto out; 496 if (ret) 497 break; 498 } 499 out: 500 fs_info->qgroup_flags |= flags; 501 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 502 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 503 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 504 ret >= 0) 505 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 506 btrfs_free_path(path); 507 508 if (ret < 0) { 509 ulist_free(fs_info->qgroup_ulist); 510 fs_info->qgroup_ulist = NULL; 511 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 512 btrfs_sysfs_del_qgroups(fs_info); 513 } 514 515 return ret < 0 ? ret : 0; 516 } 517 518 /* 519 * Called in close_ctree() when quota is still enabled. This verifies we don't 520 * leak some reserved space. 521 * 522 * Return false if no reserved space is left. 523 * Return true if some reserved space is leaked. 524 */ 525 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) 526 { 527 struct rb_node *node; 528 bool ret = false; 529 530 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 531 return ret; 532 /* 533 * Since we're unmounting, there is no race and no need to grab qgroup 534 * lock. And here we don't go post-order to provide a more user 535 * friendly sorted result. 536 */ 537 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 538 struct btrfs_qgroup *qgroup; 539 int i; 540 541 qgroup = rb_entry(node, struct btrfs_qgroup, node); 542 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 543 if (qgroup->rsv.values[i]) { 544 ret = true; 545 btrfs_warn(fs_info, 546 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 547 btrfs_qgroup_level(qgroup->qgroupid), 548 btrfs_qgroup_subvolid(qgroup->qgroupid), 549 i, qgroup->rsv.values[i]); 550 } 551 } 552 } 553 return ret; 554 } 555 556 /* 557 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 558 * first two are in single-threaded paths.And for the third one, we have set 559 * quota_root to be null with qgroup_lock held before, so it is safe to clean 560 * up the in-memory structures without qgroup_lock held. 561 */ 562 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 563 { 564 struct rb_node *n; 565 struct btrfs_qgroup *qgroup; 566 567 while ((n = rb_first(&fs_info->qgroup_tree))) { 568 qgroup = rb_entry(n, struct btrfs_qgroup, node); 569 rb_erase(n, &fs_info->qgroup_tree); 570 __del_qgroup_rb(fs_info, qgroup); 571 } 572 /* 573 * We call btrfs_free_qgroup_config() when unmounting 574 * filesystem and disabling quota, so we set qgroup_ulist 575 * to be null here to avoid double free. 576 */ 577 ulist_free(fs_info->qgroup_ulist); 578 fs_info->qgroup_ulist = NULL; 579 btrfs_sysfs_del_qgroups(fs_info); 580 } 581 582 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 583 u64 dst) 584 { 585 int ret; 586 struct btrfs_root *quota_root = trans->fs_info->quota_root; 587 struct btrfs_path *path; 588 struct btrfs_key key; 589 590 path = btrfs_alloc_path(); 591 if (!path) 592 return -ENOMEM; 593 594 key.objectid = src; 595 key.type = BTRFS_QGROUP_RELATION_KEY; 596 key.offset = dst; 597 598 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 599 600 btrfs_mark_buffer_dirty(path->nodes[0]); 601 602 btrfs_free_path(path); 603 return ret; 604 } 605 606 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 607 u64 dst) 608 { 609 int ret; 610 struct btrfs_root *quota_root = trans->fs_info->quota_root; 611 struct btrfs_path *path; 612 struct btrfs_key key; 613 614 path = btrfs_alloc_path(); 615 if (!path) 616 return -ENOMEM; 617 618 key.objectid = src; 619 key.type = BTRFS_QGROUP_RELATION_KEY; 620 key.offset = dst; 621 622 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 623 if (ret < 0) 624 goto out; 625 626 if (ret > 0) { 627 ret = -ENOENT; 628 goto out; 629 } 630 631 ret = btrfs_del_item(trans, quota_root, path); 632 out: 633 btrfs_free_path(path); 634 return ret; 635 } 636 637 static int add_qgroup_item(struct btrfs_trans_handle *trans, 638 struct btrfs_root *quota_root, u64 qgroupid) 639 { 640 int ret; 641 struct btrfs_path *path; 642 struct btrfs_qgroup_info_item *qgroup_info; 643 struct btrfs_qgroup_limit_item *qgroup_limit; 644 struct extent_buffer *leaf; 645 struct btrfs_key key; 646 647 if (btrfs_is_testing(quota_root->fs_info)) 648 return 0; 649 650 path = btrfs_alloc_path(); 651 if (!path) 652 return -ENOMEM; 653 654 key.objectid = 0; 655 key.type = BTRFS_QGROUP_INFO_KEY; 656 key.offset = qgroupid; 657 658 /* 659 * Avoid a transaction abort by catching -EEXIST here. In that 660 * case, we proceed by re-initializing the existing structure 661 * on disk. 662 */ 663 664 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 665 sizeof(*qgroup_info)); 666 if (ret && ret != -EEXIST) 667 goto out; 668 669 leaf = path->nodes[0]; 670 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 671 struct btrfs_qgroup_info_item); 672 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 673 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 674 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 675 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 676 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 677 678 btrfs_mark_buffer_dirty(leaf); 679 680 btrfs_release_path(path); 681 682 key.type = BTRFS_QGROUP_LIMIT_KEY; 683 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 684 sizeof(*qgroup_limit)); 685 if (ret && ret != -EEXIST) 686 goto out; 687 688 leaf = path->nodes[0]; 689 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 690 struct btrfs_qgroup_limit_item); 691 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 692 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 693 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 694 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 695 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 696 697 btrfs_mark_buffer_dirty(leaf); 698 699 ret = 0; 700 out: 701 btrfs_free_path(path); 702 return ret; 703 } 704 705 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 706 { 707 int ret; 708 struct btrfs_root *quota_root = trans->fs_info->quota_root; 709 struct btrfs_path *path; 710 struct btrfs_key key; 711 712 path = btrfs_alloc_path(); 713 if (!path) 714 return -ENOMEM; 715 716 key.objectid = 0; 717 key.type = BTRFS_QGROUP_INFO_KEY; 718 key.offset = qgroupid; 719 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 720 if (ret < 0) 721 goto out; 722 723 if (ret > 0) { 724 ret = -ENOENT; 725 goto out; 726 } 727 728 ret = btrfs_del_item(trans, quota_root, path); 729 if (ret) 730 goto out; 731 732 btrfs_release_path(path); 733 734 key.type = BTRFS_QGROUP_LIMIT_KEY; 735 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 736 if (ret < 0) 737 goto out; 738 739 if (ret > 0) { 740 ret = -ENOENT; 741 goto out; 742 } 743 744 ret = btrfs_del_item(trans, quota_root, path); 745 746 out: 747 btrfs_free_path(path); 748 return ret; 749 } 750 751 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 752 struct btrfs_qgroup *qgroup) 753 { 754 struct btrfs_root *quota_root = trans->fs_info->quota_root; 755 struct btrfs_path *path; 756 struct btrfs_key key; 757 struct extent_buffer *l; 758 struct btrfs_qgroup_limit_item *qgroup_limit; 759 int ret; 760 int slot; 761 762 key.objectid = 0; 763 key.type = BTRFS_QGROUP_LIMIT_KEY; 764 key.offset = qgroup->qgroupid; 765 766 path = btrfs_alloc_path(); 767 if (!path) 768 return -ENOMEM; 769 770 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 771 if (ret > 0) 772 ret = -ENOENT; 773 774 if (ret) 775 goto out; 776 777 l = path->nodes[0]; 778 slot = path->slots[0]; 779 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 780 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 781 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 782 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 783 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 784 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 785 786 btrfs_mark_buffer_dirty(l); 787 788 out: 789 btrfs_free_path(path); 790 return ret; 791 } 792 793 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 794 struct btrfs_qgroup *qgroup) 795 { 796 struct btrfs_fs_info *fs_info = trans->fs_info; 797 struct btrfs_root *quota_root = fs_info->quota_root; 798 struct btrfs_path *path; 799 struct btrfs_key key; 800 struct extent_buffer *l; 801 struct btrfs_qgroup_info_item *qgroup_info; 802 int ret; 803 int slot; 804 805 if (btrfs_is_testing(fs_info)) 806 return 0; 807 808 key.objectid = 0; 809 key.type = BTRFS_QGROUP_INFO_KEY; 810 key.offset = qgroup->qgroupid; 811 812 path = btrfs_alloc_path(); 813 if (!path) 814 return -ENOMEM; 815 816 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 817 if (ret > 0) 818 ret = -ENOENT; 819 820 if (ret) 821 goto out; 822 823 l = path->nodes[0]; 824 slot = path->slots[0]; 825 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 826 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 827 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 828 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 829 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 830 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 831 832 btrfs_mark_buffer_dirty(l); 833 834 out: 835 btrfs_free_path(path); 836 return ret; 837 } 838 839 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 840 { 841 struct btrfs_fs_info *fs_info = trans->fs_info; 842 struct btrfs_root *quota_root = fs_info->quota_root; 843 struct btrfs_path *path; 844 struct btrfs_key key; 845 struct extent_buffer *l; 846 struct btrfs_qgroup_status_item *ptr; 847 int ret; 848 int slot; 849 850 key.objectid = 0; 851 key.type = BTRFS_QGROUP_STATUS_KEY; 852 key.offset = 0; 853 854 path = btrfs_alloc_path(); 855 if (!path) 856 return -ENOMEM; 857 858 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 859 if (ret > 0) 860 ret = -ENOENT; 861 862 if (ret) 863 goto out; 864 865 l = path->nodes[0]; 866 slot = path->slots[0]; 867 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 868 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); 869 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 870 btrfs_set_qgroup_status_rescan(l, ptr, 871 fs_info->qgroup_rescan_progress.objectid); 872 873 btrfs_mark_buffer_dirty(l); 874 875 out: 876 btrfs_free_path(path); 877 return ret; 878 } 879 880 /* 881 * called with qgroup_lock held 882 */ 883 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 884 struct btrfs_root *root) 885 { 886 struct btrfs_path *path; 887 struct btrfs_key key; 888 struct extent_buffer *leaf = NULL; 889 int ret; 890 int nr = 0; 891 892 path = btrfs_alloc_path(); 893 if (!path) 894 return -ENOMEM; 895 896 path->leave_spinning = 1; 897 898 key.objectid = 0; 899 key.offset = 0; 900 key.type = 0; 901 902 while (1) { 903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 904 if (ret < 0) 905 goto out; 906 leaf = path->nodes[0]; 907 nr = btrfs_header_nritems(leaf); 908 if (!nr) 909 break; 910 /* 911 * delete the leaf one by one 912 * since the whole tree is going 913 * to be deleted. 914 */ 915 path->slots[0] = 0; 916 ret = btrfs_del_items(trans, root, path, 0, nr); 917 if (ret) 918 goto out; 919 920 btrfs_release_path(path); 921 } 922 ret = 0; 923 out: 924 btrfs_free_path(path); 925 return ret; 926 } 927 928 int btrfs_quota_enable(struct btrfs_fs_info *fs_info) 929 { 930 struct btrfs_root *quota_root; 931 struct btrfs_root *tree_root = fs_info->tree_root; 932 struct btrfs_path *path = NULL; 933 struct btrfs_qgroup_status_item *ptr; 934 struct extent_buffer *leaf; 935 struct btrfs_key key; 936 struct btrfs_key found_key; 937 struct btrfs_qgroup *qgroup = NULL; 938 struct btrfs_trans_handle *trans = NULL; 939 int ret = 0; 940 int slot; 941 942 mutex_lock(&fs_info->qgroup_ioctl_lock); 943 if (fs_info->quota_root) 944 goto out; 945 946 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 947 if (!fs_info->qgroup_ulist) { 948 ret = -ENOMEM; 949 goto out; 950 } 951 952 ret = btrfs_sysfs_add_qgroups(fs_info); 953 if (ret < 0) 954 goto out; 955 /* 956 * 1 for quota root item 957 * 1 for BTRFS_QGROUP_STATUS item 958 * 959 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 960 * per subvolume. However those are not currently reserved since it 961 * would be a lot of overkill. 962 */ 963 trans = btrfs_start_transaction(tree_root, 2); 964 if (IS_ERR(trans)) { 965 ret = PTR_ERR(trans); 966 trans = NULL; 967 goto out; 968 } 969 970 /* 971 * initially create the quota tree 972 */ 973 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 974 if (IS_ERR(quota_root)) { 975 ret = PTR_ERR(quota_root); 976 btrfs_abort_transaction(trans, ret); 977 goto out; 978 } 979 980 path = btrfs_alloc_path(); 981 if (!path) { 982 ret = -ENOMEM; 983 btrfs_abort_transaction(trans, ret); 984 goto out_free_root; 985 } 986 987 key.objectid = 0; 988 key.type = BTRFS_QGROUP_STATUS_KEY; 989 key.offset = 0; 990 991 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 992 sizeof(*ptr)); 993 if (ret) { 994 btrfs_abort_transaction(trans, ret); 995 goto out_free_path; 996 } 997 998 leaf = path->nodes[0]; 999 ptr = btrfs_item_ptr(leaf, path->slots[0], 1000 struct btrfs_qgroup_status_item); 1001 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1002 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1003 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 1004 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1005 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); 1006 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1007 1008 btrfs_mark_buffer_dirty(leaf); 1009 1010 key.objectid = 0; 1011 key.type = BTRFS_ROOT_REF_KEY; 1012 key.offset = 0; 1013 1014 btrfs_release_path(path); 1015 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1016 if (ret > 0) 1017 goto out_add_root; 1018 if (ret < 0) { 1019 btrfs_abort_transaction(trans, ret); 1020 goto out_free_path; 1021 } 1022 1023 while (1) { 1024 slot = path->slots[0]; 1025 leaf = path->nodes[0]; 1026 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1027 1028 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1029 1030 /* Release locks on tree_root before we access quota_root */ 1031 btrfs_release_path(path); 1032 1033 ret = add_qgroup_item(trans, quota_root, 1034 found_key.offset); 1035 if (ret) { 1036 btrfs_abort_transaction(trans, ret); 1037 goto out_free_path; 1038 } 1039 1040 qgroup = add_qgroup_rb(fs_info, found_key.offset); 1041 if (IS_ERR(qgroup)) { 1042 ret = PTR_ERR(qgroup); 1043 btrfs_abort_transaction(trans, ret); 1044 goto out_free_path; 1045 } 1046 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1047 if (ret < 0) { 1048 btrfs_abort_transaction(trans, ret); 1049 goto out_free_path; 1050 } 1051 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1052 path, 1, 0); 1053 if (ret < 0) { 1054 btrfs_abort_transaction(trans, ret); 1055 goto out_free_path; 1056 } 1057 if (ret > 0) { 1058 /* 1059 * Shouldn't happen, but in case it does we 1060 * don't need to do the btrfs_next_item, just 1061 * continue. 1062 */ 1063 continue; 1064 } 1065 } 1066 ret = btrfs_next_item(tree_root, path); 1067 if (ret < 0) { 1068 btrfs_abort_transaction(trans, ret); 1069 goto out_free_path; 1070 } 1071 if (ret) 1072 break; 1073 } 1074 1075 out_add_root: 1076 btrfs_release_path(path); 1077 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1078 if (ret) { 1079 btrfs_abort_transaction(trans, ret); 1080 goto out_free_path; 1081 } 1082 1083 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); 1084 if (IS_ERR(qgroup)) { 1085 ret = PTR_ERR(qgroup); 1086 btrfs_abort_transaction(trans, ret); 1087 goto out_free_path; 1088 } 1089 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1090 if (ret < 0) { 1091 btrfs_abort_transaction(trans, ret); 1092 goto out_free_path; 1093 } 1094 1095 ret = btrfs_commit_transaction(trans); 1096 trans = NULL; 1097 if (ret) 1098 goto out_free_path; 1099 1100 /* 1101 * Set quota enabled flag after committing the transaction, to avoid 1102 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1103 * creation. 1104 */ 1105 spin_lock(&fs_info->qgroup_lock); 1106 fs_info->quota_root = quota_root; 1107 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1108 spin_unlock(&fs_info->qgroup_lock); 1109 1110 ret = qgroup_rescan_init(fs_info, 0, 1); 1111 if (!ret) { 1112 qgroup_rescan_zero_tracking(fs_info); 1113 fs_info->qgroup_rescan_running = true; 1114 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1115 &fs_info->qgroup_rescan_work); 1116 } 1117 1118 out_free_path: 1119 btrfs_free_path(path); 1120 out_free_root: 1121 if (ret) 1122 btrfs_put_root(quota_root); 1123 out: 1124 if (ret) { 1125 ulist_free(fs_info->qgroup_ulist); 1126 fs_info->qgroup_ulist = NULL; 1127 if (trans) 1128 btrfs_end_transaction(trans); 1129 btrfs_sysfs_del_qgroups(fs_info); 1130 } 1131 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1132 return ret; 1133 } 1134 1135 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1136 { 1137 struct btrfs_root *quota_root; 1138 struct btrfs_trans_handle *trans = NULL; 1139 int ret = 0; 1140 1141 mutex_lock(&fs_info->qgroup_ioctl_lock); 1142 if (!fs_info->quota_root) 1143 goto out; 1144 1145 /* 1146 * 1 For the root item 1147 * 1148 * We should also reserve enough items for the quota tree deletion in 1149 * btrfs_clean_quota_tree but this is not done. 1150 */ 1151 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1152 if (IS_ERR(trans)) { 1153 ret = PTR_ERR(trans); 1154 goto out; 1155 } 1156 1157 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1158 btrfs_qgroup_wait_for_completion(fs_info, false); 1159 spin_lock(&fs_info->qgroup_lock); 1160 quota_root = fs_info->quota_root; 1161 fs_info->quota_root = NULL; 1162 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1163 spin_unlock(&fs_info->qgroup_lock); 1164 1165 btrfs_free_qgroup_config(fs_info); 1166 1167 ret = btrfs_clean_quota_tree(trans, quota_root); 1168 if (ret) { 1169 btrfs_abort_transaction(trans, ret); 1170 goto end_trans; 1171 } 1172 1173 ret = btrfs_del_root(trans, "a_root->root_key); 1174 if (ret) { 1175 btrfs_abort_transaction(trans, ret); 1176 goto end_trans; 1177 } 1178 1179 list_del("a_root->dirty_list); 1180 1181 btrfs_tree_lock(quota_root->node); 1182 btrfs_clean_tree_block(quota_root->node); 1183 btrfs_tree_unlock(quota_root->node); 1184 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 1185 1186 btrfs_put_root(quota_root); 1187 1188 end_trans: 1189 ret = btrfs_end_transaction(trans); 1190 out: 1191 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1192 return ret; 1193 } 1194 1195 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1196 struct btrfs_qgroup *qgroup) 1197 { 1198 if (list_empty(&qgroup->dirty)) 1199 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1200 } 1201 1202 /* 1203 * The easy accounting, we're updating qgroup relationship whose child qgroup 1204 * only has exclusive extents. 1205 * 1206 * In this case, all exclusive extents will also be exclusive for parent, so 1207 * excl/rfer just get added/removed. 1208 * 1209 * So is qgroup reservation space, which should also be added/removed to 1210 * parent. 1211 * Or when child tries to release reservation space, parent will underflow its 1212 * reservation (for relationship adding case). 1213 * 1214 * Caller should hold fs_info->qgroup_lock. 1215 */ 1216 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1217 struct ulist *tmp, u64 ref_root, 1218 struct btrfs_qgroup *src, int sign) 1219 { 1220 struct btrfs_qgroup *qgroup; 1221 struct btrfs_qgroup_list *glist; 1222 struct ulist_node *unode; 1223 struct ulist_iterator uiter; 1224 u64 num_bytes = src->excl; 1225 int ret = 0; 1226 1227 qgroup = find_qgroup_rb(fs_info, ref_root); 1228 if (!qgroup) 1229 goto out; 1230 1231 qgroup->rfer += sign * num_bytes; 1232 qgroup->rfer_cmpr += sign * num_bytes; 1233 1234 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1235 qgroup->excl += sign * num_bytes; 1236 qgroup->excl_cmpr += sign * num_bytes; 1237 1238 if (sign > 0) 1239 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1240 else 1241 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1242 1243 qgroup_dirty(fs_info, qgroup); 1244 1245 /* Get all of the parent groups that contain this qgroup */ 1246 list_for_each_entry(glist, &qgroup->groups, next_group) { 1247 ret = ulist_add(tmp, glist->group->qgroupid, 1248 qgroup_to_aux(glist->group), GFP_ATOMIC); 1249 if (ret < 0) 1250 goto out; 1251 } 1252 1253 /* Iterate all of the parents and adjust their reference counts */ 1254 ULIST_ITER_INIT(&uiter); 1255 while ((unode = ulist_next(tmp, &uiter))) { 1256 qgroup = unode_aux_to_qgroup(unode); 1257 qgroup->rfer += sign * num_bytes; 1258 qgroup->rfer_cmpr += sign * num_bytes; 1259 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1260 qgroup->excl += sign * num_bytes; 1261 if (sign > 0) 1262 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1263 else 1264 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1265 qgroup->excl_cmpr += sign * num_bytes; 1266 qgroup_dirty(fs_info, qgroup); 1267 1268 /* Add any parents of the parents */ 1269 list_for_each_entry(glist, &qgroup->groups, next_group) { 1270 ret = ulist_add(tmp, glist->group->qgroupid, 1271 qgroup_to_aux(glist->group), GFP_ATOMIC); 1272 if (ret < 0) 1273 goto out; 1274 } 1275 } 1276 ret = 0; 1277 out: 1278 return ret; 1279 } 1280 1281 1282 /* 1283 * Quick path for updating qgroup with only excl refs. 1284 * 1285 * In that case, just update all parent will be enough. 1286 * Or we needs to do a full rescan. 1287 * Caller should also hold fs_info->qgroup_lock. 1288 * 1289 * Return 0 for quick update, return >0 for need to full rescan 1290 * and mark INCONSISTENT flag. 1291 * Return < 0 for other error. 1292 */ 1293 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1294 struct ulist *tmp, u64 src, u64 dst, 1295 int sign) 1296 { 1297 struct btrfs_qgroup *qgroup; 1298 int ret = 1; 1299 int err = 0; 1300 1301 qgroup = find_qgroup_rb(fs_info, src); 1302 if (!qgroup) 1303 goto out; 1304 if (qgroup->excl == qgroup->rfer) { 1305 ret = 0; 1306 err = __qgroup_excl_accounting(fs_info, tmp, dst, 1307 qgroup, sign); 1308 if (err < 0) { 1309 ret = err; 1310 goto out; 1311 } 1312 } 1313 out: 1314 if (ret) 1315 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1316 return ret; 1317 } 1318 1319 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1320 u64 dst) 1321 { 1322 struct btrfs_fs_info *fs_info = trans->fs_info; 1323 struct btrfs_qgroup *parent; 1324 struct btrfs_qgroup *member; 1325 struct btrfs_qgroup_list *list; 1326 struct ulist *tmp; 1327 int ret = 0; 1328 1329 /* Check the level of src and dst first */ 1330 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1331 return -EINVAL; 1332 1333 tmp = ulist_alloc(GFP_KERNEL); 1334 if (!tmp) 1335 return -ENOMEM; 1336 1337 mutex_lock(&fs_info->qgroup_ioctl_lock); 1338 if (!fs_info->quota_root) { 1339 ret = -ENOTCONN; 1340 goto out; 1341 } 1342 member = find_qgroup_rb(fs_info, src); 1343 parent = find_qgroup_rb(fs_info, dst); 1344 if (!member || !parent) { 1345 ret = -EINVAL; 1346 goto out; 1347 } 1348 1349 /* check if such qgroup relation exist firstly */ 1350 list_for_each_entry(list, &member->groups, next_group) { 1351 if (list->group == parent) { 1352 ret = -EEXIST; 1353 goto out; 1354 } 1355 } 1356 1357 ret = add_qgroup_relation_item(trans, src, dst); 1358 if (ret) 1359 goto out; 1360 1361 ret = add_qgroup_relation_item(trans, dst, src); 1362 if (ret) { 1363 del_qgroup_relation_item(trans, src, dst); 1364 goto out; 1365 } 1366 1367 spin_lock(&fs_info->qgroup_lock); 1368 ret = add_relation_rb(fs_info, src, dst); 1369 if (ret < 0) { 1370 spin_unlock(&fs_info->qgroup_lock); 1371 goto out; 1372 } 1373 ret = quick_update_accounting(fs_info, tmp, src, dst, 1); 1374 spin_unlock(&fs_info->qgroup_lock); 1375 out: 1376 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1377 ulist_free(tmp); 1378 return ret; 1379 } 1380 1381 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1382 u64 dst) 1383 { 1384 struct btrfs_fs_info *fs_info = trans->fs_info; 1385 struct btrfs_qgroup *parent; 1386 struct btrfs_qgroup *member; 1387 struct btrfs_qgroup_list *list; 1388 struct ulist *tmp; 1389 bool found = false; 1390 int ret = 0; 1391 int ret2; 1392 1393 tmp = ulist_alloc(GFP_KERNEL); 1394 if (!tmp) 1395 return -ENOMEM; 1396 1397 if (!fs_info->quota_root) { 1398 ret = -ENOTCONN; 1399 goto out; 1400 } 1401 1402 member = find_qgroup_rb(fs_info, src); 1403 parent = find_qgroup_rb(fs_info, dst); 1404 /* 1405 * The parent/member pair doesn't exist, then try to delete the dead 1406 * relation items only. 1407 */ 1408 if (!member || !parent) 1409 goto delete_item; 1410 1411 /* check if such qgroup relation exist firstly */ 1412 list_for_each_entry(list, &member->groups, next_group) { 1413 if (list->group == parent) { 1414 found = true; 1415 break; 1416 } 1417 } 1418 1419 delete_item: 1420 ret = del_qgroup_relation_item(trans, src, dst); 1421 if (ret < 0 && ret != -ENOENT) 1422 goto out; 1423 ret2 = del_qgroup_relation_item(trans, dst, src); 1424 if (ret2 < 0 && ret2 != -ENOENT) 1425 goto out; 1426 1427 /* At least one deletion succeeded, return 0 */ 1428 if (!ret || !ret2) 1429 ret = 0; 1430 1431 if (found) { 1432 spin_lock(&fs_info->qgroup_lock); 1433 del_relation_rb(fs_info, src, dst); 1434 ret = quick_update_accounting(fs_info, tmp, src, dst, -1); 1435 spin_unlock(&fs_info->qgroup_lock); 1436 } 1437 out: 1438 ulist_free(tmp); 1439 return ret; 1440 } 1441 1442 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1443 u64 dst) 1444 { 1445 struct btrfs_fs_info *fs_info = trans->fs_info; 1446 int ret = 0; 1447 1448 mutex_lock(&fs_info->qgroup_ioctl_lock); 1449 ret = __del_qgroup_relation(trans, src, dst); 1450 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1451 1452 return ret; 1453 } 1454 1455 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1456 { 1457 struct btrfs_fs_info *fs_info = trans->fs_info; 1458 struct btrfs_root *quota_root; 1459 struct btrfs_qgroup *qgroup; 1460 int ret = 0; 1461 1462 mutex_lock(&fs_info->qgroup_ioctl_lock); 1463 if (!fs_info->quota_root) { 1464 ret = -ENOTCONN; 1465 goto out; 1466 } 1467 quota_root = fs_info->quota_root; 1468 qgroup = find_qgroup_rb(fs_info, qgroupid); 1469 if (qgroup) { 1470 ret = -EEXIST; 1471 goto out; 1472 } 1473 1474 ret = add_qgroup_item(trans, quota_root, qgroupid); 1475 if (ret) 1476 goto out; 1477 1478 spin_lock(&fs_info->qgroup_lock); 1479 qgroup = add_qgroup_rb(fs_info, qgroupid); 1480 spin_unlock(&fs_info->qgroup_lock); 1481 1482 if (IS_ERR(qgroup)) { 1483 ret = PTR_ERR(qgroup); 1484 goto out; 1485 } 1486 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1487 out: 1488 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1489 return ret; 1490 } 1491 1492 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1493 { 1494 struct btrfs_fs_info *fs_info = trans->fs_info; 1495 struct btrfs_qgroup *qgroup; 1496 struct btrfs_qgroup_list *list; 1497 int ret = 0; 1498 1499 mutex_lock(&fs_info->qgroup_ioctl_lock); 1500 if (!fs_info->quota_root) { 1501 ret = -ENOTCONN; 1502 goto out; 1503 } 1504 1505 qgroup = find_qgroup_rb(fs_info, qgroupid); 1506 if (!qgroup) { 1507 ret = -ENOENT; 1508 goto out; 1509 } 1510 1511 /* Check if there are no children of this qgroup */ 1512 if (!list_empty(&qgroup->members)) { 1513 ret = -EBUSY; 1514 goto out; 1515 } 1516 1517 ret = del_qgroup_item(trans, qgroupid); 1518 if (ret && ret != -ENOENT) 1519 goto out; 1520 1521 while (!list_empty(&qgroup->groups)) { 1522 list = list_first_entry(&qgroup->groups, 1523 struct btrfs_qgroup_list, next_group); 1524 ret = __del_qgroup_relation(trans, qgroupid, 1525 list->group->qgroupid); 1526 if (ret) 1527 goto out; 1528 } 1529 1530 spin_lock(&fs_info->qgroup_lock); 1531 del_qgroup_rb(fs_info, qgroupid); 1532 spin_unlock(&fs_info->qgroup_lock); 1533 out: 1534 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1535 return ret; 1536 } 1537 1538 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1539 struct btrfs_qgroup_limit *limit) 1540 { 1541 struct btrfs_fs_info *fs_info = trans->fs_info; 1542 struct btrfs_qgroup *qgroup; 1543 int ret = 0; 1544 /* Sometimes we would want to clear the limit on this qgroup. 1545 * To meet this requirement, we treat the -1 as a special value 1546 * which tell kernel to clear the limit on this qgroup. 1547 */ 1548 const u64 CLEAR_VALUE = -1; 1549 1550 mutex_lock(&fs_info->qgroup_ioctl_lock); 1551 if (!fs_info->quota_root) { 1552 ret = -ENOTCONN; 1553 goto out; 1554 } 1555 1556 qgroup = find_qgroup_rb(fs_info, qgroupid); 1557 if (!qgroup) { 1558 ret = -ENOENT; 1559 goto out; 1560 } 1561 1562 spin_lock(&fs_info->qgroup_lock); 1563 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1564 if (limit->max_rfer == CLEAR_VALUE) { 1565 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1566 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1567 qgroup->max_rfer = 0; 1568 } else { 1569 qgroup->max_rfer = limit->max_rfer; 1570 } 1571 } 1572 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1573 if (limit->max_excl == CLEAR_VALUE) { 1574 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1575 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1576 qgroup->max_excl = 0; 1577 } else { 1578 qgroup->max_excl = limit->max_excl; 1579 } 1580 } 1581 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1582 if (limit->rsv_rfer == CLEAR_VALUE) { 1583 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1584 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1585 qgroup->rsv_rfer = 0; 1586 } else { 1587 qgroup->rsv_rfer = limit->rsv_rfer; 1588 } 1589 } 1590 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1591 if (limit->rsv_excl == CLEAR_VALUE) { 1592 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1593 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1594 qgroup->rsv_excl = 0; 1595 } else { 1596 qgroup->rsv_excl = limit->rsv_excl; 1597 } 1598 } 1599 qgroup->lim_flags |= limit->flags; 1600 1601 spin_unlock(&fs_info->qgroup_lock); 1602 1603 ret = update_qgroup_limit_item(trans, qgroup); 1604 if (ret) { 1605 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1606 btrfs_info(fs_info, "unable to update quota limit for %llu", 1607 qgroupid); 1608 } 1609 1610 out: 1611 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1612 return ret; 1613 } 1614 1615 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 1616 struct btrfs_delayed_ref_root *delayed_refs, 1617 struct btrfs_qgroup_extent_record *record) 1618 { 1619 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1620 struct rb_node *parent_node = NULL; 1621 struct btrfs_qgroup_extent_record *entry; 1622 u64 bytenr = record->bytenr; 1623 1624 lockdep_assert_held(&delayed_refs->lock); 1625 trace_btrfs_qgroup_trace_extent(fs_info, record); 1626 1627 while (*p) { 1628 parent_node = *p; 1629 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1630 node); 1631 if (bytenr < entry->bytenr) { 1632 p = &(*p)->rb_left; 1633 } else if (bytenr > entry->bytenr) { 1634 p = &(*p)->rb_right; 1635 } else { 1636 if (record->data_rsv && !entry->data_rsv) { 1637 entry->data_rsv = record->data_rsv; 1638 entry->data_rsv_refroot = 1639 record->data_rsv_refroot; 1640 } 1641 return 1; 1642 } 1643 } 1644 1645 rb_link_node(&record->node, parent_node, p); 1646 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1647 return 0; 1648 } 1649 1650 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, 1651 struct btrfs_qgroup_extent_record *qrecord) 1652 { 1653 struct ulist *old_root; 1654 u64 bytenr = qrecord->bytenr; 1655 int ret; 1656 1657 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); 1658 if (ret < 0) { 1659 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1660 btrfs_warn(fs_info, 1661 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 1662 ret); 1663 return 0; 1664 } 1665 1666 /* 1667 * Here we don't need to get the lock of 1668 * trans->transaction->delayed_refs, since inserted qrecord won't 1669 * be deleted, only qrecord->node may be modified (new qrecord insert) 1670 * 1671 * So modifying qrecord->old_roots is safe here 1672 */ 1673 qrecord->old_roots = old_root; 1674 return 0; 1675 } 1676 1677 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 1678 u64 num_bytes, gfp_t gfp_flag) 1679 { 1680 struct btrfs_fs_info *fs_info = trans->fs_info; 1681 struct btrfs_qgroup_extent_record *record; 1682 struct btrfs_delayed_ref_root *delayed_refs; 1683 int ret; 1684 1685 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) 1686 || bytenr == 0 || num_bytes == 0) 1687 return 0; 1688 record = kzalloc(sizeof(*record), gfp_flag); 1689 if (!record) 1690 return -ENOMEM; 1691 1692 delayed_refs = &trans->transaction->delayed_refs; 1693 record->bytenr = bytenr; 1694 record->num_bytes = num_bytes; 1695 record->old_roots = NULL; 1696 1697 spin_lock(&delayed_refs->lock); 1698 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 1699 spin_unlock(&delayed_refs->lock); 1700 if (ret > 0) { 1701 kfree(record); 1702 return 0; 1703 } 1704 return btrfs_qgroup_trace_extent_post(fs_info, record); 1705 } 1706 1707 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 1708 struct extent_buffer *eb) 1709 { 1710 struct btrfs_fs_info *fs_info = trans->fs_info; 1711 int nr = btrfs_header_nritems(eb); 1712 int i, extent_type, ret; 1713 struct btrfs_key key; 1714 struct btrfs_file_extent_item *fi; 1715 u64 bytenr, num_bytes; 1716 1717 /* We can be called directly from walk_up_proc() */ 1718 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1719 return 0; 1720 1721 for (i = 0; i < nr; i++) { 1722 btrfs_item_key_to_cpu(eb, &key, i); 1723 1724 if (key.type != BTRFS_EXTENT_DATA_KEY) 1725 continue; 1726 1727 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 1728 /* filter out non qgroup-accountable extents */ 1729 extent_type = btrfs_file_extent_type(eb, fi); 1730 1731 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1732 continue; 1733 1734 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 1735 if (!bytenr) 1736 continue; 1737 1738 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 1739 1740 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes, 1741 GFP_NOFS); 1742 if (ret) 1743 return ret; 1744 } 1745 cond_resched(); 1746 return 0; 1747 } 1748 1749 /* 1750 * Walk up the tree from the bottom, freeing leaves and any interior 1751 * nodes which have had all slots visited. If a node (leaf or 1752 * interior) is freed, the node above it will have it's slot 1753 * incremented. The root node will never be freed. 1754 * 1755 * At the end of this function, we should have a path which has all 1756 * slots incremented to the next position for a search. If we need to 1757 * read a new node it will be NULL and the node above it will have the 1758 * correct slot selected for a later read. 1759 * 1760 * If we increment the root nodes slot counter past the number of 1761 * elements, 1 is returned to signal completion of the search. 1762 */ 1763 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 1764 { 1765 int level = 0; 1766 int nr, slot; 1767 struct extent_buffer *eb; 1768 1769 if (root_level == 0) 1770 return 1; 1771 1772 while (level <= root_level) { 1773 eb = path->nodes[level]; 1774 nr = btrfs_header_nritems(eb); 1775 path->slots[level]++; 1776 slot = path->slots[level]; 1777 if (slot >= nr || level == 0) { 1778 /* 1779 * Don't free the root - we will detect this 1780 * condition after our loop and return a 1781 * positive value for caller to stop walking the tree. 1782 */ 1783 if (level != root_level) { 1784 btrfs_tree_unlock_rw(eb, path->locks[level]); 1785 path->locks[level] = 0; 1786 1787 free_extent_buffer(eb); 1788 path->nodes[level] = NULL; 1789 path->slots[level] = 0; 1790 } 1791 } else { 1792 /* 1793 * We have a valid slot to walk back down 1794 * from. Stop here so caller can process these 1795 * new nodes. 1796 */ 1797 break; 1798 } 1799 1800 level++; 1801 } 1802 1803 eb = path->nodes[root_level]; 1804 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 1805 return 1; 1806 1807 return 0; 1808 } 1809 1810 /* 1811 * Helper function to trace a subtree tree block swap. 1812 * 1813 * The swap will happen in highest tree block, but there may be a lot of 1814 * tree blocks involved. 1815 * 1816 * For example: 1817 * OO = Old tree blocks 1818 * NN = New tree blocks allocated during balance 1819 * 1820 * File tree (257) Reloc tree for 257 1821 * L2 OO NN 1822 * / \ / \ 1823 * L1 OO OO (a) OO NN (a) 1824 * / \ / \ / \ / \ 1825 * L0 OO OO OO OO OO OO NN NN 1826 * (b) (c) (b) (c) 1827 * 1828 * When calling qgroup_trace_extent_swap(), we will pass: 1829 * @src_eb = OO(a) 1830 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 1831 * @dst_level = 0 1832 * @root_level = 1 1833 * 1834 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 1835 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 1836 * 1837 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 1838 * 1839 * 1) Tree search from @src_eb 1840 * It should acts as a simplified btrfs_search_slot(). 1841 * The key for search can be extracted from @dst_path->nodes[dst_level] 1842 * (first key). 1843 * 1844 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 1845 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 1846 * They should be marked during previous (@dst_level = 1) iteration. 1847 * 1848 * 3) Mark file extents in leaves dirty 1849 * We don't have good way to pick out new file extents only. 1850 * So we still follow the old method by scanning all file extents in 1851 * the leave. 1852 * 1853 * This function can free us from keeping two paths, thus later we only need 1854 * to care about how to iterate all new tree blocks in reloc tree. 1855 */ 1856 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 1857 struct extent_buffer *src_eb, 1858 struct btrfs_path *dst_path, 1859 int dst_level, int root_level, 1860 bool trace_leaf) 1861 { 1862 struct btrfs_key key; 1863 struct btrfs_path *src_path; 1864 struct btrfs_fs_info *fs_info = trans->fs_info; 1865 u32 nodesize = fs_info->nodesize; 1866 int cur_level = root_level; 1867 int ret; 1868 1869 BUG_ON(dst_level > root_level); 1870 /* Level mismatch */ 1871 if (btrfs_header_level(src_eb) != root_level) 1872 return -EINVAL; 1873 1874 src_path = btrfs_alloc_path(); 1875 if (!src_path) { 1876 ret = -ENOMEM; 1877 goto out; 1878 } 1879 1880 if (dst_level) 1881 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 1882 else 1883 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 1884 1885 /* For src_path */ 1886 atomic_inc(&src_eb->refs); 1887 src_path->nodes[root_level] = src_eb; 1888 src_path->slots[root_level] = dst_path->slots[root_level]; 1889 src_path->locks[root_level] = 0; 1890 1891 /* A simplified version of btrfs_search_slot() */ 1892 while (cur_level >= dst_level) { 1893 struct btrfs_key src_key; 1894 struct btrfs_key dst_key; 1895 1896 if (src_path->nodes[cur_level] == NULL) { 1897 struct btrfs_key first_key; 1898 struct extent_buffer *eb; 1899 int parent_slot; 1900 u64 child_gen; 1901 u64 child_bytenr; 1902 1903 eb = src_path->nodes[cur_level + 1]; 1904 parent_slot = src_path->slots[cur_level + 1]; 1905 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 1906 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 1907 btrfs_node_key_to_cpu(eb, &first_key, parent_slot); 1908 1909 eb = read_tree_block(fs_info, child_bytenr, child_gen, 1910 cur_level, &first_key); 1911 if (IS_ERR(eb)) { 1912 ret = PTR_ERR(eb); 1913 goto out; 1914 } else if (!extent_buffer_uptodate(eb)) { 1915 free_extent_buffer(eb); 1916 ret = -EIO; 1917 goto out; 1918 } 1919 1920 src_path->nodes[cur_level] = eb; 1921 1922 btrfs_tree_read_lock(eb); 1923 btrfs_set_lock_blocking_read(eb); 1924 src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; 1925 } 1926 1927 src_path->slots[cur_level] = dst_path->slots[cur_level]; 1928 if (cur_level) { 1929 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 1930 &dst_key, dst_path->slots[cur_level]); 1931 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 1932 &src_key, src_path->slots[cur_level]); 1933 } else { 1934 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 1935 &dst_key, dst_path->slots[cur_level]); 1936 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 1937 &src_key, src_path->slots[cur_level]); 1938 } 1939 /* Content mismatch, something went wrong */ 1940 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { 1941 ret = -ENOENT; 1942 goto out; 1943 } 1944 cur_level--; 1945 } 1946 1947 /* 1948 * Now both @dst_path and @src_path have been populated, record the tree 1949 * blocks for qgroup accounting. 1950 */ 1951 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 1952 nodesize, GFP_NOFS); 1953 if (ret < 0) 1954 goto out; 1955 ret = btrfs_qgroup_trace_extent(trans, 1956 dst_path->nodes[dst_level]->start, 1957 nodesize, GFP_NOFS); 1958 if (ret < 0) 1959 goto out; 1960 1961 /* Record leaf file extents */ 1962 if (dst_level == 0 && trace_leaf) { 1963 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 1964 if (ret < 0) 1965 goto out; 1966 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 1967 } 1968 out: 1969 btrfs_free_path(src_path); 1970 return ret; 1971 } 1972 1973 /* 1974 * Helper function to do recursive generation-aware depth-first search, to 1975 * locate all new tree blocks in a subtree of reloc tree. 1976 * 1977 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 1978 * reloc tree 1979 * L2 NN (a) 1980 * / \ 1981 * L1 OO NN (b) 1982 * / \ / \ 1983 * L0 OO OO OO NN 1984 * (c) (d) 1985 * If we pass: 1986 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 1987 * @cur_level = 1 1988 * @root_level = 1 1989 * 1990 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 1991 * above tree blocks along with their counter parts in file tree. 1992 * While during search, old tree blocks OO(c) will be skipped as tree block swap 1993 * won't affect OO(c). 1994 */ 1995 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 1996 struct extent_buffer *src_eb, 1997 struct btrfs_path *dst_path, 1998 int cur_level, int root_level, 1999 u64 last_snapshot, bool trace_leaf) 2000 { 2001 struct btrfs_fs_info *fs_info = trans->fs_info; 2002 struct extent_buffer *eb; 2003 bool need_cleanup = false; 2004 int ret = 0; 2005 int i; 2006 2007 /* Level sanity check */ 2008 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2009 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2010 root_level < cur_level) { 2011 btrfs_err_rl(fs_info, 2012 "%s: bad levels, cur_level=%d root_level=%d", 2013 __func__, cur_level, root_level); 2014 return -EUCLEAN; 2015 } 2016 2017 /* Read the tree block if needed */ 2018 if (dst_path->nodes[cur_level] == NULL) { 2019 struct btrfs_key first_key; 2020 int parent_slot; 2021 u64 child_gen; 2022 u64 child_bytenr; 2023 2024 /* 2025 * dst_path->nodes[root_level] must be initialized before 2026 * calling this function. 2027 */ 2028 if (cur_level == root_level) { 2029 btrfs_err_rl(fs_info, 2030 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2031 __func__, root_level, root_level, cur_level); 2032 return -EUCLEAN; 2033 } 2034 2035 /* 2036 * We need to get child blockptr/gen from parent before we can 2037 * read it. 2038 */ 2039 eb = dst_path->nodes[cur_level + 1]; 2040 parent_slot = dst_path->slots[cur_level + 1]; 2041 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2042 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2043 btrfs_node_key_to_cpu(eb, &first_key, parent_slot); 2044 2045 /* This node is old, no need to trace */ 2046 if (child_gen < last_snapshot) 2047 goto out; 2048 2049 eb = read_tree_block(fs_info, child_bytenr, child_gen, 2050 cur_level, &first_key); 2051 if (IS_ERR(eb)) { 2052 ret = PTR_ERR(eb); 2053 goto out; 2054 } else if (!extent_buffer_uptodate(eb)) { 2055 free_extent_buffer(eb); 2056 ret = -EIO; 2057 goto out; 2058 } 2059 2060 dst_path->nodes[cur_level] = eb; 2061 dst_path->slots[cur_level] = 0; 2062 2063 btrfs_tree_read_lock(eb); 2064 btrfs_set_lock_blocking_read(eb); 2065 dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; 2066 need_cleanup = true; 2067 } 2068 2069 /* Now record this tree block and its counter part for qgroups */ 2070 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2071 root_level, trace_leaf); 2072 if (ret < 0) 2073 goto cleanup; 2074 2075 eb = dst_path->nodes[cur_level]; 2076 2077 if (cur_level > 0) { 2078 /* Iterate all child tree blocks */ 2079 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2080 /* Skip old tree blocks as they won't be swapped */ 2081 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2082 continue; 2083 dst_path->slots[cur_level] = i; 2084 2085 /* Recursive call (at most 7 times) */ 2086 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2087 dst_path, cur_level - 1, root_level, 2088 last_snapshot, trace_leaf); 2089 if (ret < 0) 2090 goto cleanup; 2091 } 2092 } 2093 2094 cleanup: 2095 if (need_cleanup) { 2096 /* Clean up */ 2097 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2098 dst_path->locks[cur_level]); 2099 free_extent_buffer(dst_path->nodes[cur_level]); 2100 dst_path->nodes[cur_level] = NULL; 2101 dst_path->slots[cur_level] = 0; 2102 dst_path->locks[cur_level] = 0; 2103 } 2104 out: 2105 return ret; 2106 } 2107 2108 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2109 struct extent_buffer *src_eb, 2110 struct extent_buffer *dst_eb, 2111 u64 last_snapshot, bool trace_leaf) 2112 { 2113 struct btrfs_fs_info *fs_info = trans->fs_info; 2114 struct btrfs_path *dst_path = NULL; 2115 int level; 2116 int ret; 2117 2118 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2119 return 0; 2120 2121 /* Wrong parameter order */ 2122 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { 2123 btrfs_err_rl(fs_info, 2124 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2125 btrfs_header_generation(src_eb), 2126 btrfs_header_generation(dst_eb)); 2127 return -EUCLEAN; 2128 } 2129 2130 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2131 ret = -EIO; 2132 goto out; 2133 } 2134 2135 level = btrfs_header_level(dst_eb); 2136 dst_path = btrfs_alloc_path(); 2137 if (!dst_path) { 2138 ret = -ENOMEM; 2139 goto out; 2140 } 2141 /* For dst_path */ 2142 atomic_inc(&dst_eb->refs); 2143 dst_path->nodes[level] = dst_eb; 2144 dst_path->slots[level] = 0; 2145 dst_path->locks[level] = 0; 2146 2147 /* Do the generation aware breadth-first search */ 2148 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2149 level, last_snapshot, trace_leaf); 2150 if (ret < 0) 2151 goto out; 2152 ret = 0; 2153 2154 out: 2155 btrfs_free_path(dst_path); 2156 if (ret < 0) 2157 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2158 return ret; 2159 } 2160 2161 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2162 struct extent_buffer *root_eb, 2163 u64 root_gen, int root_level) 2164 { 2165 struct btrfs_fs_info *fs_info = trans->fs_info; 2166 int ret = 0; 2167 int level; 2168 struct extent_buffer *eb = root_eb; 2169 struct btrfs_path *path = NULL; 2170 2171 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); 2172 BUG_ON(root_eb == NULL); 2173 2174 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2175 return 0; 2176 2177 if (!extent_buffer_uptodate(root_eb)) { 2178 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL); 2179 if (ret) 2180 goto out; 2181 } 2182 2183 if (root_level == 0) { 2184 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); 2185 goto out; 2186 } 2187 2188 path = btrfs_alloc_path(); 2189 if (!path) 2190 return -ENOMEM; 2191 2192 /* 2193 * Walk down the tree. Missing extent blocks are filled in as 2194 * we go. Metadata is accounted every time we read a new 2195 * extent block. 2196 * 2197 * When we reach a leaf, we account for file extent items in it, 2198 * walk back up the tree (adjusting slot pointers as we go) 2199 * and restart the search process. 2200 */ 2201 atomic_inc(&root_eb->refs); /* For path */ 2202 path->nodes[root_level] = root_eb; 2203 path->slots[root_level] = 0; 2204 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2205 walk_down: 2206 level = root_level; 2207 while (level >= 0) { 2208 if (path->nodes[level] == NULL) { 2209 struct btrfs_key first_key; 2210 int parent_slot; 2211 u64 child_gen; 2212 u64 child_bytenr; 2213 2214 /* 2215 * We need to get child blockptr/gen from parent before 2216 * we can read it. 2217 */ 2218 eb = path->nodes[level + 1]; 2219 parent_slot = path->slots[level + 1]; 2220 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2221 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2222 btrfs_node_key_to_cpu(eb, &first_key, parent_slot); 2223 2224 eb = read_tree_block(fs_info, child_bytenr, child_gen, 2225 level, &first_key); 2226 if (IS_ERR(eb)) { 2227 ret = PTR_ERR(eb); 2228 goto out; 2229 } else if (!extent_buffer_uptodate(eb)) { 2230 free_extent_buffer(eb); 2231 ret = -EIO; 2232 goto out; 2233 } 2234 2235 path->nodes[level] = eb; 2236 path->slots[level] = 0; 2237 2238 btrfs_tree_read_lock(eb); 2239 btrfs_set_lock_blocking_read(eb); 2240 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 2241 2242 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2243 fs_info->nodesize, 2244 GFP_NOFS); 2245 if (ret) 2246 goto out; 2247 } 2248 2249 if (level == 0) { 2250 ret = btrfs_qgroup_trace_leaf_items(trans, 2251 path->nodes[level]); 2252 if (ret) 2253 goto out; 2254 2255 /* Nonzero return here means we completed our search */ 2256 ret = adjust_slots_upwards(path, root_level); 2257 if (ret) 2258 break; 2259 2260 /* Restart search with new slots */ 2261 goto walk_down; 2262 } 2263 2264 level--; 2265 } 2266 2267 ret = 0; 2268 out: 2269 btrfs_free_path(path); 2270 2271 return ret; 2272 } 2273 2274 #define UPDATE_NEW 0 2275 #define UPDATE_OLD 1 2276 /* 2277 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2278 */ 2279 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2280 struct ulist *roots, struct ulist *tmp, 2281 struct ulist *qgroups, u64 seq, int update_old) 2282 { 2283 struct ulist_node *unode; 2284 struct ulist_iterator uiter; 2285 struct ulist_node *tmp_unode; 2286 struct ulist_iterator tmp_uiter; 2287 struct btrfs_qgroup *qg; 2288 int ret = 0; 2289 2290 if (!roots) 2291 return 0; 2292 ULIST_ITER_INIT(&uiter); 2293 while ((unode = ulist_next(roots, &uiter))) { 2294 qg = find_qgroup_rb(fs_info, unode->val); 2295 if (!qg) 2296 continue; 2297 2298 ulist_reinit(tmp); 2299 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), 2300 GFP_ATOMIC); 2301 if (ret < 0) 2302 return ret; 2303 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); 2304 if (ret < 0) 2305 return ret; 2306 ULIST_ITER_INIT(&tmp_uiter); 2307 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 2308 struct btrfs_qgroup_list *glist; 2309 2310 qg = unode_aux_to_qgroup(tmp_unode); 2311 if (update_old) 2312 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2313 else 2314 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2315 list_for_each_entry(glist, &qg->groups, next_group) { 2316 ret = ulist_add(qgroups, glist->group->qgroupid, 2317 qgroup_to_aux(glist->group), 2318 GFP_ATOMIC); 2319 if (ret < 0) 2320 return ret; 2321 ret = ulist_add(tmp, glist->group->qgroupid, 2322 qgroup_to_aux(glist->group), 2323 GFP_ATOMIC); 2324 if (ret < 0) 2325 return ret; 2326 } 2327 } 2328 } 2329 return 0; 2330 } 2331 2332 /* 2333 * Update qgroup rfer/excl counters. 2334 * Rfer update is easy, codes can explain themselves. 2335 * 2336 * Excl update is tricky, the update is split into 2 parts. 2337 * Part 1: Possible exclusive <-> sharing detect: 2338 * | A | !A | 2339 * ------------------------------------- 2340 * B | * | - | 2341 * ------------------------------------- 2342 * !B | + | ** | 2343 * ------------------------------------- 2344 * 2345 * Conditions: 2346 * A: cur_old_roots < nr_old_roots (not exclusive before) 2347 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2348 * B: cur_new_roots < nr_new_roots (not exclusive now) 2349 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2350 * 2351 * Results: 2352 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2353 * *: Definitely not changed. **: Possible unchanged. 2354 * 2355 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2356 * 2357 * To make the logic clear, we first use condition A and B to split 2358 * combination into 4 results. 2359 * 2360 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2361 * only on variant maybe 0. 2362 * 2363 * Lastly, check result **, since there are 2 variants maybe 0, split them 2364 * again(2x2). 2365 * But this time we don't need to consider other things, the codes and logic 2366 * is easy to understand now. 2367 */ 2368 static int qgroup_update_counters(struct btrfs_fs_info *fs_info, 2369 struct ulist *qgroups, 2370 u64 nr_old_roots, 2371 u64 nr_new_roots, 2372 u64 num_bytes, u64 seq) 2373 { 2374 struct ulist_node *unode; 2375 struct ulist_iterator uiter; 2376 struct btrfs_qgroup *qg; 2377 u64 cur_new_count, cur_old_count; 2378 2379 ULIST_ITER_INIT(&uiter); 2380 while ((unode = ulist_next(qgroups, &uiter))) { 2381 bool dirty = false; 2382 2383 qg = unode_aux_to_qgroup(unode); 2384 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2385 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2386 2387 trace_qgroup_update_counters(fs_info, qg, cur_old_count, 2388 cur_new_count); 2389 2390 /* Rfer update part */ 2391 if (cur_old_count == 0 && cur_new_count > 0) { 2392 qg->rfer += num_bytes; 2393 qg->rfer_cmpr += num_bytes; 2394 dirty = true; 2395 } 2396 if (cur_old_count > 0 && cur_new_count == 0) { 2397 qg->rfer -= num_bytes; 2398 qg->rfer_cmpr -= num_bytes; 2399 dirty = true; 2400 } 2401 2402 /* Excl update part */ 2403 /* Exclusive/none -> shared case */ 2404 if (cur_old_count == nr_old_roots && 2405 cur_new_count < nr_new_roots) { 2406 /* Exclusive -> shared */ 2407 if (cur_old_count != 0) { 2408 qg->excl -= num_bytes; 2409 qg->excl_cmpr -= num_bytes; 2410 dirty = true; 2411 } 2412 } 2413 2414 /* Shared -> exclusive/none case */ 2415 if (cur_old_count < nr_old_roots && 2416 cur_new_count == nr_new_roots) { 2417 /* Shared->exclusive */ 2418 if (cur_new_count != 0) { 2419 qg->excl += num_bytes; 2420 qg->excl_cmpr += num_bytes; 2421 dirty = true; 2422 } 2423 } 2424 2425 /* Exclusive/none -> exclusive/none case */ 2426 if (cur_old_count == nr_old_roots && 2427 cur_new_count == nr_new_roots) { 2428 if (cur_old_count == 0) { 2429 /* None -> exclusive/none */ 2430 2431 if (cur_new_count != 0) { 2432 /* None -> exclusive */ 2433 qg->excl += num_bytes; 2434 qg->excl_cmpr += num_bytes; 2435 dirty = true; 2436 } 2437 /* None -> none, nothing changed */ 2438 } else { 2439 /* Exclusive -> exclusive/none */ 2440 2441 if (cur_new_count == 0) { 2442 /* Exclusive -> none */ 2443 qg->excl -= num_bytes; 2444 qg->excl_cmpr -= num_bytes; 2445 dirty = true; 2446 } 2447 /* Exclusive -> exclusive, nothing changed */ 2448 } 2449 } 2450 2451 if (dirty) 2452 qgroup_dirty(fs_info, qg); 2453 } 2454 return 0; 2455 } 2456 2457 /* 2458 * Check if the @roots potentially is a list of fs tree roots 2459 * 2460 * Return 0 for definitely not a fs/subvol tree roots ulist 2461 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2462 * one as well) 2463 */ 2464 static int maybe_fs_roots(struct ulist *roots) 2465 { 2466 struct ulist_node *unode; 2467 struct ulist_iterator uiter; 2468 2469 /* Empty one, still possible for fs roots */ 2470 if (!roots || roots->nnodes == 0) 2471 return 1; 2472 2473 ULIST_ITER_INIT(&uiter); 2474 unode = ulist_next(roots, &uiter); 2475 if (!unode) 2476 return 1; 2477 2478 /* 2479 * If it contains fs tree roots, then it must belong to fs/subvol 2480 * trees. 2481 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2482 */ 2483 return is_fstree(unode->val); 2484 } 2485 2486 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2487 u64 num_bytes, struct ulist *old_roots, 2488 struct ulist *new_roots) 2489 { 2490 struct btrfs_fs_info *fs_info = trans->fs_info; 2491 struct ulist *qgroups = NULL; 2492 struct ulist *tmp = NULL; 2493 u64 seq; 2494 u64 nr_new_roots = 0; 2495 u64 nr_old_roots = 0; 2496 int ret = 0; 2497 2498 /* 2499 * If quotas get disabled meanwhile, the resouces need to be freed and 2500 * we can't just exit here. 2501 */ 2502 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2503 goto out_free; 2504 2505 if (new_roots) { 2506 if (!maybe_fs_roots(new_roots)) 2507 goto out_free; 2508 nr_new_roots = new_roots->nnodes; 2509 } 2510 if (old_roots) { 2511 if (!maybe_fs_roots(old_roots)) 2512 goto out_free; 2513 nr_old_roots = old_roots->nnodes; 2514 } 2515 2516 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2517 if (nr_old_roots == 0 && nr_new_roots == 0) 2518 goto out_free; 2519 2520 BUG_ON(!fs_info->quota_root); 2521 2522 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2523 num_bytes, nr_old_roots, nr_new_roots); 2524 2525 qgroups = ulist_alloc(GFP_NOFS); 2526 if (!qgroups) { 2527 ret = -ENOMEM; 2528 goto out_free; 2529 } 2530 tmp = ulist_alloc(GFP_NOFS); 2531 if (!tmp) { 2532 ret = -ENOMEM; 2533 goto out_free; 2534 } 2535 2536 mutex_lock(&fs_info->qgroup_rescan_lock); 2537 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2538 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2539 mutex_unlock(&fs_info->qgroup_rescan_lock); 2540 ret = 0; 2541 goto out_free; 2542 } 2543 } 2544 mutex_unlock(&fs_info->qgroup_rescan_lock); 2545 2546 spin_lock(&fs_info->qgroup_lock); 2547 seq = fs_info->qgroup_seq; 2548 2549 /* Update old refcnts using old_roots */ 2550 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, 2551 UPDATE_OLD); 2552 if (ret < 0) 2553 goto out; 2554 2555 /* Update new refcnts using new_roots */ 2556 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, 2557 UPDATE_NEW); 2558 if (ret < 0) 2559 goto out; 2560 2561 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, 2562 num_bytes, seq); 2563 2564 /* 2565 * Bump qgroup_seq to avoid seq overlap 2566 */ 2567 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2568 out: 2569 spin_unlock(&fs_info->qgroup_lock); 2570 out_free: 2571 ulist_free(tmp); 2572 ulist_free(qgroups); 2573 ulist_free(old_roots); 2574 ulist_free(new_roots); 2575 return ret; 2576 } 2577 2578 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 2579 { 2580 struct btrfs_fs_info *fs_info = trans->fs_info; 2581 struct btrfs_qgroup_extent_record *record; 2582 struct btrfs_delayed_ref_root *delayed_refs; 2583 struct ulist *new_roots = NULL; 2584 struct rb_node *node; 2585 u64 num_dirty_extents = 0; 2586 u64 qgroup_to_skip; 2587 int ret = 0; 2588 2589 delayed_refs = &trans->transaction->delayed_refs; 2590 qgroup_to_skip = delayed_refs->qgroup_to_skip; 2591 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 2592 record = rb_entry(node, struct btrfs_qgroup_extent_record, 2593 node); 2594 2595 num_dirty_extents++; 2596 trace_btrfs_qgroup_account_extents(fs_info, record); 2597 2598 if (!ret) { 2599 /* 2600 * Old roots should be searched when inserting qgroup 2601 * extent record 2602 */ 2603 if (WARN_ON(!record->old_roots)) { 2604 /* Search commit root to find old_roots */ 2605 ret = btrfs_find_all_roots(NULL, fs_info, 2606 record->bytenr, 0, 2607 &record->old_roots, false); 2608 if (ret < 0) 2609 goto cleanup; 2610 } 2611 2612 /* Free the reserved data space */ 2613 btrfs_qgroup_free_refroot(fs_info, 2614 record->data_rsv_refroot, 2615 record->data_rsv, 2616 BTRFS_QGROUP_RSV_DATA); 2617 /* 2618 * Use SEQ_LAST as time_seq to do special search, which 2619 * doesn't lock tree or delayed_refs and search current 2620 * root. It's safe inside commit_transaction(). 2621 */ 2622 ret = btrfs_find_all_roots(trans, fs_info, 2623 record->bytenr, SEQ_LAST, &new_roots, false); 2624 if (ret < 0) 2625 goto cleanup; 2626 if (qgroup_to_skip) { 2627 ulist_del(new_roots, qgroup_to_skip, 0); 2628 ulist_del(record->old_roots, qgroup_to_skip, 2629 0); 2630 } 2631 ret = btrfs_qgroup_account_extent(trans, record->bytenr, 2632 record->num_bytes, 2633 record->old_roots, 2634 new_roots); 2635 record->old_roots = NULL; 2636 new_roots = NULL; 2637 } 2638 cleanup: 2639 ulist_free(record->old_roots); 2640 ulist_free(new_roots); 2641 new_roots = NULL; 2642 rb_erase(node, &delayed_refs->dirty_extent_root); 2643 kfree(record); 2644 2645 } 2646 trace_qgroup_num_dirty_extents(fs_info, trans->transid, 2647 num_dirty_extents); 2648 return ret; 2649 } 2650 2651 /* 2652 * called from commit_transaction. Writes all changed qgroups to disk. 2653 */ 2654 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 2655 { 2656 struct btrfs_fs_info *fs_info = trans->fs_info; 2657 int ret = 0; 2658 2659 if (!fs_info->quota_root) 2660 return ret; 2661 2662 spin_lock(&fs_info->qgroup_lock); 2663 while (!list_empty(&fs_info->dirty_qgroups)) { 2664 struct btrfs_qgroup *qgroup; 2665 qgroup = list_first_entry(&fs_info->dirty_qgroups, 2666 struct btrfs_qgroup, dirty); 2667 list_del_init(&qgroup->dirty); 2668 spin_unlock(&fs_info->qgroup_lock); 2669 ret = update_qgroup_info_item(trans, qgroup); 2670 if (ret) 2671 fs_info->qgroup_flags |= 2672 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2673 ret = update_qgroup_limit_item(trans, qgroup); 2674 if (ret) 2675 fs_info->qgroup_flags |= 2676 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2677 spin_lock(&fs_info->qgroup_lock); 2678 } 2679 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2680 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 2681 else 2682 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 2683 spin_unlock(&fs_info->qgroup_lock); 2684 2685 ret = update_qgroup_status_item(trans); 2686 if (ret) 2687 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2688 2689 return ret; 2690 } 2691 2692 /* 2693 * Copy the accounting information between qgroups. This is necessary 2694 * when a snapshot or a subvolume is created. Throwing an error will 2695 * cause a transaction abort so we take extra care here to only error 2696 * when a readonly fs is a reasonable outcome. 2697 */ 2698 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 2699 u64 objectid, struct btrfs_qgroup_inherit *inherit) 2700 { 2701 int ret = 0; 2702 int i; 2703 u64 *i_qgroups; 2704 bool committing = false; 2705 struct btrfs_fs_info *fs_info = trans->fs_info; 2706 struct btrfs_root *quota_root; 2707 struct btrfs_qgroup *srcgroup; 2708 struct btrfs_qgroup *dstgroup; 2709 bool need_rescan = false; 2710 u32 level_size = 0; 2711 u64 nums; 2712 2713 /* 2714 * There are only two callers of this function. 2715 * 2716 * One in create_subvol() in the ioctl context, which needs to hold 2717 * the qgroup_ioctl_lock. 2718 * 2719 * The other one in create_pending_snapshot() where no other qgroup 2720 * code can modify the fs as they all need to either start a new trans 2721 * or hold a trans handler, thus we don't need to hold 2722 * qgroup_ioctl_lock. 2723 * This would avoid long and complex lock chain and make lockdep happy. 2724 */ 2725 spin_lock(&fs_info->trans_lock); 2726 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 2727 committing = true; 2728 spin_unlock(&fs_info->trans_lock); 2729 2730 if (!committing) 2731 mutex_lock(&fs_info->qgroup_ioctl_lock); 2732 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2733 goto out; 2734 2735 quota_root = fs_info->quota_root; 2736 if (!quota_root) { 2737 ret = -EINVAL; 2738 goto out; 2739 } 2740 2741 if (inherit) { 2742 i_qgroups = (u64 *)(inherit + 1); 2743 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 2744 2 * inherit->num_excl_copies; 2745 for (i = 0; i < nums; ++i) { 2746 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 2747 2748 /* 2749 * Zero out invalid groups so we can ignore 2750 * them later. 2751 */ 2752 if (!srcgroup || 2753 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 2754 *i_qgroups = 0ULL; 2755 2756 ++i_qgroups; 2757 } 2758 } 2759 2760 /* 2761 * create a tracking group for the subvol itself 2762 */ 2763 ret = add_qgroup_item(trans, quota_root, objectid); 2764 if (ret) 2765 goto out; 2766 2767 /* 2768 * add qgroup to all inherited groups 2769 */ 2770 if (inherit) { 2771 i_qgroups = (u64 *)(inherit + 1); 2772 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { 2773 if (*i_qgroups == 0) 2774 continue; 2775 ret = add_qgroup_relation_item(trans, objectid, 2776 *i_qgroups); 2777 if (ret && ret != -EEXIST) 2778 goto out; 2779 ret = add_qgroup_relation_item(trans, *i_qgroups, 2780 objectid); 2781 if (ret && ret != -EEXIST) 2782 goto out; 2783 } 2784 ret = 0; 2785 } 2786 2787 2788 spin_lock(&fs_info->qgroup_lock); 2789 2790 dstgroup = add_qgroup_rb(fs_info, objectid); 2791 if (IS_ERR(dstgroup)) { 2792 ret = PTR_ERR(dstgroup); 2793 goto unlock; 2794 } 2795 2796 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 2797 dstgroup->lim_flags = inherit->lim.flags; 2798 dstgroup->max_rfer = inherit->lim.max_rfer; 2799 dstgroup->max_excl = inherit->lim.max_excl; 2800 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 2801 dstgroup->rsv_excl = inherit->lim.rsv_excl; 2802 2803 ret = update_qgroup_limit_item(trans, dstgroup); 2804 if (ret) { 2805 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2806 btrfs_info(fs_info, 2807 "unable to update quota limit for %llu", 2808 dstgroup->qgroupid); 2809 goto unlock; 2810 } 2811 } 2812 2813 if (srcid) { 2814 srcgroup = find_qgroup_rb(fs_info, srcid); 2815 if (!srcgroup) 2816 goto unlock; 2817 2818 /* 2819 * We call inherit after we clone the root in order to make sure 2820 * our counts don't go crazy, so at this point the only 2821 * difference between the two roots should be the root node. 2822 */ 2823 level_size = fs_info->nodesize; 2824 dstgroup->rfer = srcgroup->rfer; 2825 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 2826 dstgroup->excl = level_size; 2827 dstgroup->excl_cmpr = level_size; 2828 srcgroup->excl = level_size; 2829 srcgroup->excl_cmpr = level_size; 2830 2831 /* inherit the limit info */ 2832 dstgroup->lim_flags = srcgroup->lim_flags; 2833 dstgroup->max_rfer = srcgroup->max_rfer; 2834 dstgroup->max_excl = srcgroup->max_excl; 2835 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 2836 dstgroup->rsv_excl = srcgroup->rsv_excl; 2837 2838 qgroup_dirty(fs_info, dstgroup); 2839 qgroup_dirty(fs_info, srcgroup); 2840 } 2841 2842 if (!inherit) 2843 goto unlock; 2844 2845 i_qgroups = (u64 *)(inherit + 1); 2846 for (i = 0; i < inherit->num_qgroups; ++i) { 2847 if (*i_qgroups) { 2848 ret = add_relation_rb(fs_info, objectid, *i_qgroups); 2849 if (ret) 2850 goto unlock; 2851 } 2852 ++i_qgroups; 2853 2854 /* 2855 * If we're doing a snapshot, and adding the snapshot to a new 2856 * qgroup, the numbers are guaranteed to be incorrect. 2857 */ 2858 if (srcid) 2859 need_rescan = true; 2860 } 2861 2862 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { 2863 struct btrfs_qgroup *src; 2864 struct btrfs_qgroup *dst; 2865 2866 if (!i_qgroups[0] || !i_qgroups[1]) 2867 continue; 2868 2869 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2870 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2871 2872 if (!src || !dst) { 2873 ret = -EINVAL; 2874 goto unlock; 2875 } 2876 2877 dst->rfer = src->rfer - level_size; 2878 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2879 2880 /* Manually tweaking numbers certainly needs a rescan */ 2881 need_rescan = true; 2882 } 2883 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 2884 struct btrfs_qgroup *src; 2885 struct btrfs_qgroup *dst; 2886 2887 if (!i_qgroups[0] || !i_qgroups[1]) 2888 continue; 2889 2890 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2891 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2892 2893 if (!src || !dst) { 2894 ret = -EINVAL; 2895 goto unlock; 2896 } 2897 2898 dst->excl = src->excl + level_size; 2899 dst->excl_cmpr = src->excl_cmpr + level_size; 2900 need_rescan = true; 2901 } 2902 2903 unlock: 2904 spin_unlock(&fs_info->qgroup_lock); 2905 if (!ret) 2906 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 2907 out: 2908 if (!committing) 2909 mutex_unlock(&fs_info->qgroup_ioctl_lock); 2910 if (need_rescan) 2911 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2912 return ret; 2913 } 2914 2915 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 2916 { 2917 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2918 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 2919 return false; 2920 2921 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 2922 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 2923 return false; 2924 2925 return true; 2926 } 2927 2928 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 2929 enum btrfs_qgroup_rsv_type type) 2930 { 2931 struct btrfs_qgroup *qgroup; 2932 struct btrfs_fs_info *fs_info = root->fs_info; 2933 u64 ref_root = root->root_key.objectid; 2934 int ret = 0; 2935 struct ulist_node *unode; 2936 struct ulist_iterator uiter; 2937 2938 if (!is_fstree(ref_root)) 2939 return 0; 2940 2941 if (num_bytes == 0) 2942 return 0; 2943 2944 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 2945 capable(CAP_SYS_RESOURCE)) 2946 enforce = false; 2947 2948 spin_lock(&fs_info->qgroup_lock); 2949 if (!fs_info->quota_root) 2950 goto out; 2951 2952 qgroup = find_qgroup_rb(fs_info, ref_root); 2953 if (!qgroup) 2954 goto out; 2955 2956 /* 2957 * in a first step, we check all affected qgroups if any limits would 2958 * be exceeded 2959 */ 2960 ulist_reinit(fs_info->qgroup_ulist); 2961 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2962 qgroup_to_aux(qgroup), GFP_ATOMIC); 2963 if (ret < 0) 2964 goto out; 2965 ULIST_ITER_INIT(&uiter); 2966 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2967 struct btrfs_qgroup *qg; 2968 struct btrfs_qgroup_list *glist; 2969 2970 qg = unode_aux_to_qgroup(unode); 2971 2972 if (enforce && !qgroup_check_limits(qg, num_bytes)) { 2973 ret = -EDQUOT; 2974 goto out; 2975 } 2976 2977 list_for_each_entry(glist, &qg->groups, next_group) { 2978 ret = ulist_add(fs_info->qgroup_ulist, 2979 glist->group->qgroupid, 2980 qgroup_to_aux(glist->group), GFP_ATOMIC); 2981 if (ret < 0) 2982 goto out; 2983 } 2984 } 2985 ret = 0; 2986 /* 2987 * no limits exceeded, now record the reservation into all qgroups 2988 */ 2989 ULIST_ITER_INIT(&uiter); 2990 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2991 struct btrfs_qgroup *qg; 2992 2993 qg = unode_aux_to_qgroup(unode); 2994 2995 qgroup_rsv_add(fs_info, qg, num_bytes, type); 2996 } 2997 2998 out: 2999 spin_unlock(&fs_info->qgroup_lock); 3000 return ret; 3001 } 3002 3003 /* 3004 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3005 * qgroup). 3006 * 3007 * Will handle all higher level qgroup too. 3008 * 3009 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3010 * This special case is only used for META_PERTRANS type. 3011 */ 3012 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3013 u64 ref_root, u64 num_bytes, 3014 enum btrfs_qgroup_rsv_type type) 3015 { 3016 struct btrfs_qgroup *qgroup; 3017 struct ulist_node *unode; 3018 struct ulist_iterator uiter; 3019 int ret = 0; 3020 3021 if (!is_fstree(ref_root)) 3022 return; 3023 3024 if (num_bytes == 0) 3025 return; 3026 3027 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3028 WARN(1, "%s: Invalid type to free", __func__); 3029 return; 3030 } 3031 spin_lock(&fs_info->qgroup_lock); 3032 3033 if (!fs_info->quota_root) 3034 goto out; 3035 3036 qgroup = find_qgroup_rb(fs_info, ref_root); 3037 if (!qgroup) 3038 goto out; 3039 3040 if (num_bytes == (u64)-1) 3041 /* 3042 * We're freeing all pertrans rsv, get reserved value from 3043 * level 0 qgroup as real num_bytes to free. 3044 */ 3045 num_bytes = qgroup->rsv.values[type]; 3046 3047 ulist_reinit(fs_info->qgroup_ulist); 3048 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3049 qgroup_to_aux(qgroup), GFP_ATOMIC); 3050 if (ret < 0) 3051 goto out; 3052 ULIST_ITER_INIT(&uiter); 3053 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3054 struct btrfs_qgroup *qg; 3055 struct btrfs_qgroup_list *glist; 3056 3057 qg = unode_aux_to_qgroup(unode); 3058 3059 qgroup_rsv_release(fs_info, qg, num_bytes, type); 3060 3061 list_for_each_entry(glist, &qg->groups, next_group) { 3062 ret = ulist_add(fs_info->qgroup_ulist, 3063 glist->group->qgroupid, 3064 qgroup_to_aux(glist->group), GFP_ATOMIC); 3065 if (ret < 0) 3066 goto out; 3067 } 3068 } 3069 3070 out: 3071 spin_unlock(&fs_info->qgroup_lock); 3072 } 3073 3074 /* 3075 * Check if the leaf is the last leaf. Which means all node pointers 3076 * are at their last position. 3077 */ 3078 static bool is_last_leaf(struct btrfs_path *path) 3079 { 3080 int i; 3081 3082 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3083 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3084 return false; 3085 } 3086 return true; 3087 } 3088 3089 /* 3090 * returns < 0 on error, 0 when more leafs are to be scanned. 3091 * returns 1 when done. 3092 */ 3093 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3094 struct btrfs_path *path) 3095 { 3096 struct btrfs_fs_info *fs_info = trans->fs_info; 3097 struct btrfs_key found; 3098 struct extent_buffer *scratch_leaf = NULL; 3099 struct ulist *roots = NULL; 3100 u64 num_bytes; 3101 bool done; 3102 int slot; 3103 int ret; 3104 3105 mutex_lock(&fs_info->qgroup_rescan_lock); 3106 ret = btrfs_search_slot_for_read(fs_info->extent_root, 3107 &fs_info->qgroup_rescan_progress, 3108 path, 1, 0); 3109 3110 btrfs_debug(fs_info, 3111 "current progress key (%llu %u %llu), search_slot ret %d", 3112 fs_info->qgroup_rescan_progress.objectid, 3113 fs_info->qgroup_rescan_progress.type, 3114 fs_info->qgroup_rescan_progress.offset, ret); 3115 3116 if (ret) { 3117 /* 3118 * The rescan is about to end, we will not be scanning any 3119 * further blocks. We cannot unset the RESCAN flag here, because 3120 * we want to commit the transaction if everything went well. 3121 * To make the live accounting work in this phase, we set our 3122 * scan progress pointer such that every real extent objectid 3123 * will be smaller. 3124 */ 3125 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3126 btrfs_release_path(path); 3127 mutex_unlock(&fs_info->qgroup_rescan_lock); 3128 return ret; 3129 } 3130 done = is_last_leaf(path); 3131 3132 btrfs_item_key_to_cpu(path->nodes[0], &found, 3133 btrfs_header_nritems(path->nodes[0]) - 1); 3134 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3135 3136 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3137 if (!scratch_leaf) { 3138 ret = -ENOMEM; 3139 mutex_unlock(&fs_info->qgroup_rescan_lock); 3140 goto out; 3141 } 3142 slot = path->slots[0]; 3143 btrfs_release_path(path); 3144 mutex_unlock(&fs_info->qgroup_rescan_lock); 3145 3146 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3147 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3148 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3149 found.type != BTRFS_METADATA_ITEM_KEY) 3150 continue; 3151 if (found.type == BTRFS_METADATA_ITEM_KEY) 3152 num_bytes = fs_info->nodesize; 3153 else 3154 num_bytes = found.offset; 3155 3156 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, 3157 &roots, false); 3158 if (ret < 0) 3159 goto out; 3160 /* For rescan, just pass old_roots as NULL */ 3161 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3162 num_bytes, NULL, roots); 3163 if (ret < 0) 3164 goto out; 3165 } 3166 out: 3167 if (scratch_leaf) 3168 free_extent_buffer(scratch_leaf); 3169 3170 if (done && !ret) { 3171 ret = 1; 3172 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3173 } 3174 return ret; 3175 } 3176 3177 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3178 { 3179 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3180 qgroup_rescan_work); 3181 struct btrfs_path *path; 3182 struct btrfs_trans_handle *trans = NULL; 3183 int err = -ENOMEM; 3184 int ret = 0; 3185 3186 path = btrfs_alloc_path(); 3187 if (!path) 3188 goto out; 3189 /* 3190 * Rescan should only search for commit root, and any later difference 3191 * should be recorded by qgroup 3192 */ 3193 path->search_commit_root = 1; 3194 path->skip_locking = 1; 3195 3196 err = 0; 3197 while (!err && !btrfs_fs_closing(fs_info)) { 3198 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3199 if (IS_ERR(trans)) { 3200 err = PTR_ERR(trans); 3201 break; 3202 } 3203 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 3204 err = -EINTR; 3205 } else { 3206 err = qgroup_rescan_leaf(trans, path); 3207 } 3208 if (err > 0) 3209 btrfs_commit_transaction(trans); 3210 else 3211 btrfs_end_transaction(trans); 3212 } 3213 3214 out: 3215 btrfs_free_path(path); 3216 3217 mutex_lock(&fs_info->qgroup_rescan_lock); 3218 if (err > 0 && 3219 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3220 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3221 } else if (err < 0) { 3222 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3223 } 3224 mutex_unlock(&fs_info->qgroup_rescan_lock); 3225 3226 /* 3227 * only update status, since the previous part has already updated the 3228 * qgroup info. 3229 */ 3230 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3231 if (IS_ERR(trans)) { 3232 err = PTR_ERR(trans); 3233 trans = NULL; 3234 btrfs_err(fs_info, 3235 "fail to start transaction for status update: %d", 3236 err); 3237 } 3238 3239 mutex_lock(&fs_info->qgroup_rescan_lock); 3240 if (!btrfs_fs_closing(fs_info)) 3241 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3242 if (trans) { 3243 ret = update_qgroup_status_item(trans); 3244 if (ret < 0) { 3245 err = ret; 3246 btrfs_err(fs_info, "fail to update qgroup status: %d", 3247 err); 3248 } 3249 } 3250 fs_info->qgroup_rescan_running = false; 3251 complete_all(&fs_info->qgroup_rescan_completion); 3252 mutex_unlock(&fs_info->qgroup_rescan_lock); 3253 3254 if (!trans) 3255 return; 3256 3257 btrfs_end_transaction(trans); 3258 3259 if (btrfs_fs_closing(fs_info)) { 3260 btrfs_info(fs_info, "qgroup scan paused"); 3261 } else if (err >= 0) { 3262 btrfs_info(fs_info, "qgroup scan completed%s", 3263 err > 0 ? " (inconsistency flag cleared)" : ""); 3264 } else { 3265 btrfs_err(fs_info, "qgroup scan failed with %d", err); 3266 } 3267 } 3268 3269 /* 3270 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3271 * memory required for the rescan context. 3272 */ 3273 static int 3274 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3275 int init_flags) 3276 { 3277 int ret = 0; 3278 3279 if (!init_flags) { 3280 /* we're resuming qgroup rescan at mount time */ 3281 if (!(fs_info->qgroup_flags & 3282 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3283 btrfs_warn(fs_info, 3284 "qgroup rescan init failed, qgroup rescan is not queued"); 3285 ret = -EINVAL; 3286 } else if (!(fs_info->qgroup_flags & 3287 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3288 btrfs_warn(fs_info, 3289 "qgroup rescan init failed, qgroup is not enabled"); 3290 ret = -EINVAL; 3291 } 3292 3293 if (ret) 3294 return ret; 3295 } 3296 3297 mutex_lock(&fs_info->qgroup_rescan_lock); 3298 3299 if (init_flags) { 3300 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3301 btrfs_warn(fs_info, 3302 "qgroup rescan is already in progress"); 3303 ret = -EINPROGRESS; 3304 } else if (!(fs_info->qgroup_flags & 3305 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3306 btrfs_warn(fs_info, 3307 "qgroup rescan init failed, qgroup is not enabled"); 3308 ret = -EINVAL; 3309 } 3310 3311 if (ret) { 3312 mutex_unlock(&fs_info->qgroup_rescan_lock); 3313 return ret; 3314 } 3315 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3316 } 3317 3318 memset(&fs_info->qgroup_rescan_progress, 0, 3319 sizeof(fs_info->qgroup_rescan_progress)); 3320 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3321 init_completion(&fs_info->qgroup_rescan_completion); 3322 mutex_unlock(&fs_info->qgroup_rescan_lock); 3323 3324 btrfs_init_work(&fs_info->qgroup_rescan_work, 3325 btrfs_qgroup_rescan_worker, NULL, NULL); 3326 return 0; 3327 } 3328 3329 static void 3330 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 3331 { 3332 struct rb_node *n; 3333 struct btrfs_qgroup *qgroup; 3334 3335 spin_lock(&fs_info->qgroup_lock); 3336 /* clear all current qgroup tracking information */ 3337 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 3338 qgroup = rb_entry(n, struct btrfs_qgroup, node); 3339 qgroup->rfer = 0; 3340 qgroup->rfer_cmpr = 0; 3341 qgroup->excl = 0; 3342 qgroup->excl_cmpr = 0; 3343 qgroup_dirty(fs_info, qgroup); 3344 } 3345 spin_unlock(&fs_info->qgroup_lock); 3346 } 3347 3348 int 3349 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 3350 { 3351 int ret = 0; 3352 struct btrfs_trans_handle *trans; 3353 3354 ret = qgroup_rescan_init(fs_info, 0, 1); 3355 if (ret) 3356 return ret; 3357 3358 /* 3359 * We have set the rescan_progress to 0, which means no more 3360 * delayed refs will be accounted by btrfs_qgroup_account_ref. 3361 * However, btrfs_qgroup_account_ref may be right after its call 3362 * to btrfs_find_all_roots, in which case it would still do the 3363 * accounting. 3364 * To solve this, we're committing the transaction, which will 3365 * ensure we run all delayed refs and only after that, we are 3366 * going to clear all tracking information for a clean start. 3367 */ 3368 3369 trans = btrfs_join_transaction(fs_info->fs_root); 3370 if (IS_ERR(trans)) { 3371 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3372 return PTR_ERR(trans); 3373 } 3374 ret = btrfs_commit_transaction(trans); 3375 if (ret) { 3376 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3377 return ret; 3378 } 3379 3380 qgroup_rescan_zero_tracking(fs_info); 3381 3382 mutex_lock(&fs_info->qgroup_rescan_lock); 3383 fs_info->qgroup_rescan_running = true; 3384 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3385 &fs_info->qgroup_rescan_work); 3386 mutex_unlock(&fs_info->qgroup_rescan_lock); 3387 3388 return 0; 3389 } 3390 3391 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 3392 bool interruptible) 3393 { 3394 int running; 3395 int ret = 0; 3396 3397 mutex_lock(&fs_info->qgroup_rescan_lock); 3398 running = fs_info->qgroup_rescan_running; 3399 mutex_unlock(&fs_info->qgroup_rescan_lock); 3400 3401 if (!running) 3402 return 0; 3403 3404 if (interruptible) 3405 ret = wait_for_completion_interruptible( 3406 &fs_info->qgroup_rescan_completion); 3407 else 3408 wait_for_completion(&fs_info->qgroup_rescan_completion); 3409 3410 return ret; 3411 } 3412 3413 /* 3414 * this is only called from open_ctree where we're still single threaded, thus 3415 * locking is omitted here. 3416 */ 3417 void 3418 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 3419 { 3420 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3421 mutex_lock(&fs_info->qgroup_rescan_lock); 3422 fs_info->qgroup_rescan_running = true; 3423 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3424 &fs_info->qgroup_rescan_work); 3425 mutex_unlock(&fs_info->qgroup_rescan_lock); 3426 } 3427 } 3428 3429 #define rbtree_iterate_from_safe(node, next, start) \ 3430 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 3431 3432 static int qgroup_unreserve_range(struct btrfs_inode *inode, 3433 struct extent_changeset *reserved, u64 start, 3434 u64 len) 3435 { 3436 struct rb_node *node; 3437 struct rb_node *next; 3438 struct ulist_node *entry; 3439 int ret = 0; 3440 3441 node = reserved->range_changed.root.rb_node; 3442 if (!node) 3443 return 0; 3444 while (node) { 3445 entry = rb_entry(node, struct ulist_node, rb_node); 3446 if (entry->val < start) 3447 node = node->rb_right; 3448 else 3449 node = node->rb_left; 3450 } 3451 3452 if (entry->val > start && rb_prev(&entry->rb_node)) 3453 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 3454 rb_node); 3455 3456 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 3457 u64 entry_start; 3458 u64 entry_end; 3459 u64 entry_len; 3460 int clear_ret; 3461 3462 entry = rb_entry(node, struct ulist_node, rb_node); 3463 entry_start = entry->val; 3464 entry_end = entry->aux; 3465 entry_len = entry_end - entry_start + 1; 3466 3467 if (entry_start >= start + len) 3468 break; 3469 if (entry_start + entry_len <= start) 3470 continue; 3471 /* 3472 * Now the entry is in [start, start + len), revert the 3473 * EXTENT_QGROUP_RESERVED bit. 3474 */ 3475 clear_ret = clear_extent_bits(&inode->io_tree, entry_start, 3476 entry_end, EXTENT_QGROUP_RESERVED); 3477 if (!ret && clear_ret < 0) 3478 ret = clear_ret; 3479 3480 ulist_del(&reserved->range_changed, entry->val, entry->aux); 3481 if (likely(reserved->bytes_changed >= entry_len)) { 3482 reserved->bytes_changed -= entry_len; 3483 } else { 3484 WARN_ON(1); 3485 reserved->bytes_changed = 0; 3486 } 3487 } 3488 3489 return ret; 3490 } 3491 3492 /* 3493 * Try to free some space for qgroup. 3494 * 3495 * For qgroup, there are only 3 ways to free qgroup space: 3496 * - Flush nodatacow write 3497 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 3498 * In theory, we should only flush nodatacow inodes, but it's not yet 3499 * possible, so we need to flush the whole root. 3500 * 3501 * - Wait for ordered extents 3502 * When ordered extents are finished, their reserved metadata is finally 3503 * converted to per_trans status, which can be freed by later commit 3504 * transaction. 3505 * 3506 * - Commit transaction 3507 * This would free the meta_per_trans space. 3508 * In theory this shouldn't provide much space, but any more qgroup space 3509 * is needed. 3510 */ 3511 static int try_flush_qgroup(struct btrfs_root *root) 3512 { 3513 struct btrfs_trans_handle *trans; 3514 int ret; 3515 3516 /* 3517 * We don't want to run flush again and again, so if there is a running 3518 * one, we won't try to start a new flush, but exit directly. 3519 */ 3520 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3521 wait_event(root->qgroup_flush_wait, 3522 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3523 return 0; 3524 } 3525 3526 ret = btrfs_start_delalloc_snapshot(root); 3527 if (ret < 0) 3528 goto out; 3529 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 3530 3531 trans = btrfs_join_transaction(root); 3532 if (IS_ERR(trans)) { 3533 ret = PTR_ERR(trans); 3534 goto out; 3535 } 3536 3537 ret = btrfs_commit_transaction(trans); 3538 out: 3539 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 3540 wake_up(&root->qgroup_flush_wait); 3541 return ret; 3542 } 3543 3544 static int qgroup_reserve_data(struct btrfs_inode *inode, 3545 struct extent_changeset **reserved_ret, u64 start, 3546 u64 len) 3547 { 3548 struct btrfs_root *root = inode->root; 3549 struct extent_changeset *reserved; 3550 bool new_reserved = false; 3551 u64 orig_reserved; 3552 u64 to_reserve; 3553 int ret; 3554 3555 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || 3556 !is_fstree(root->root_key.objectid) || len == 0) 3557 return 0; 3558 3559 /* @reserved parameter is mandatory for qgroup */ 3560 if (WARN_ON(!reserved_ret)) 3561 return -EINVAL; 3562 if (!*reserved_ret) { 3563 new_reserved = true; 3564 *reserved_ret = extent_changeset_alloc(); 3565 if (!*reserved_ret) 3566 return -ENOMEM; 3567 } 3568 reserved = *reserved_ret; 3569 /* Record already reserved space */ 3570 orig_reserved = reserved->bytes_changed; 3571 ret = set_record_extent_bits(&inode->io_tree, start, 3572 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 3573 3574 /* Newly reserved space */ 3575 to_reserve = reserved->bytes_changed - orig_reserved; 3576 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 3577 to_reserve, QGROUP_RESERVE); 3578 if (ret < 0) 3579 goto out; 3580 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 3581 if (ret < 0) 3582 goto cleanup; 3583 3584 return ret; 3585 3586 cleanup: 3587 qgroup_unreserve_range(inode, reserved, start, len); 3588 out: 3589 if (new_reserved) { 3590 extent_changeset_release(reserved); 3591 kfree(reserved); 3592 *reserved_ret = NULL; 3593 } 3594 return ret; 3595 } 3596 3597 /* 3598 * Reserve qgroup space for range [start, start + len). 3599 * 3600 * This function will either reserve space from related qgroups or do nothing 3601 * if the range is already reserved. 3602 * 3603 * Return 0 for successful reservation 3604 * Return <0 for error (including -EQUOT) 3605 * 3606 * NOTE: This function may sleep for memory allocation, dirty page flushing and 3607 * commit transaction. So caller should not hold any dirty page locked. 3608 */ 3609 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 3610 struct extent_changeset **reserved_ret, u64 start, 3611 u64 len) 3612 { 3613 int ret; 3614 3615 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 3616 if (ret <= 0 && ret != -EDQUOT) 3617 return ret; 3618 3619 ret = try_flush_qgroup(inode->root); 3620 if (ret < 0) 3621 return ret; 3622 return qgroup_reserve_data(inode, reserved_ret, start, len); 3623 } 3624 3625 /* Free ranges specified by @reserved, normally in error path */ 3626 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 3627 struct extent_changeset *reserved, u64 start, u64 len) 3628 { 3629 struct btrfs_root *root = inode->root; 3630 struct ulist_node *unode; 3631 struct ulist_iterator uiter; 3632 struct extent_changeset changeset; 3633 int freed = 0; 3634 int ret; 3635 3636 extent_changeset_init(&changeset); 3637 len = round_up(start + len, root->fs_info->sectorsize); 3638 start = round_down(start, root->fs_info->sectorsize); 3639 3640 ULIST_ITER_INIT(&uiter); 3641 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 3642 u64 range_start = unode->val; 3643 /* unode->aux is the inclusive end */ 3644 u64 range_len = unode->aux - range_start + 1; 3645 u64 free_start; 3646 u64 free_len; 3647 3648 extent_changeset_release(&changeset); 3649 3650 /* Only free range in range [start, start + len) */ 3651 if (range_start >= start + len || 3652 range_start + range_len <= start) 3653 continue; 3654 free_start = max(range_start, start); 3655 free_len = min(start + len, range_start + range_len) - 3656 free_start; 3657 /* 3658 * TODO: To also modify reserved->ranges_reserved to reflect 3659 * the modification. 3660 * 3661 * However as long as we free qgroup reserved according to 3662 * EXTENT_QGROUP_RESERVED, we won't double free. 3663 * So not need to rush. 3664 */ 3665 ret = clear_record_extent_bits(&inode->io_tree, free_start, 3666 free_start + free_len - 1, 3667 EXTENT_QGROUP_RESERVED, &changeset); 3668 if (ret < 0) 3669 goto out; 3670 freed += changeset.bytes_changed; 3671 } 3672 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed, 3673 BTRFS_QGROUP_RSV_DATA); 3674 ret = freed; 3675 out: 3676 extent_changeset_release(&changeset); 3677 return ret; 3678 } 3679 3680 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 3681 struct extent_changeset *reserved, u64 start, u64 len, 3682 int free) 3683 { 3684 struct extent_changeset changeset; 3685 int trace_op = QGROUP_RELEASE; 3686 int ret; 3687 3688 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags)) 3689 return 0; 3690 3691 /* In release case, we shouldn't have @reserved */ 3692 WARN_ON(!free && reserved); 3693 if (free && reserved) 3694 return qgroup_free_reserved_data(inode, reserved, start, len); 3695 extent_changeset_init(&changeset); 3696 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, 3697 EXTENT_QGROUP_RESERVED, &changeset); 3698 if (ret < 0) 3699 goto out; 3700 3701 if (free) 3702 trace_op = QGROUP_FREE; 3703 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 3704 changeset.bytes_changed, trace_op); 3705 if (free) 3706 btrfs_qgroup_free_refroot(inode->root->fs_info, 3707 inode->root->root_key.objectid, 3708 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3709 ret = changeset.bytes_changed; 3710 out: 3711 extent_changeset_release(&changeset); 3712 return ret; 3713 } 3714 3715 /* 3716 * Free a reserved space range from io_tree and related qgroups 3717 * 3718 * Should be called when a range of pages get invalidated before reaching disk. 3719 * Or for error cleanup case. 3720 * if @reserved is given, only reserved range in [@start, @start + @len) will 3721 * be freed. 3722 * 3723 * For data written to disk, use btrfs_qgroup_release_data(). 3724 * 3725 * NOTE: This function may sleep for memory allocation. 3726 */ 3727 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 3728 struct extent_changeset *reserved, u64 start, u64 len) 3729 { 3730 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); 3731 } 3732 3733 /* 3734 * Release a reserved space range from io_tree only. 3735 * 3736 * Should be called when a range of pages get written to disk and corresponding 3737 * FILE_EXTENT is inserted into corresponding root. 3738 * 3739 * Since new qgroup accounting framework will only update qgroup numbers at 3740 * commit_transaction() time, its reserved space shouldn't be freed from 3741 * related qgroups. 3742 * 3743 * But we should release the range from io_tree, to allow further write to be 3744 * COWed. 3745 * 3746 * NOTE: This function may sleep for memory allocation. 3747 */ 3748 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len) 3749 { 3750 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); 3751 } 3752 3753 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3754 enum btrfs_qgroup_rsv_type type) 3755 { 3756 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3757 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3758 return; 3759 if (num_bytes == 0) 3760 return; 3761 3762 spin_lock(&root->qgroup_meta_rsv_lock); 3763 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 3764 root->qgroup_meta_rsv_prealloc += num_bytes; 3765 else 3766 root->qgroup_meta_rsv_pertrans += num_bytes; 3767 spin_unlock(&root->qgroup_meta_rsv_lock); 3768 } 3769 3770 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3771 enum btrfs_qgroup_rsv_type type) 3772 { 3773 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3774 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3775 return 0; 3776 if (num_bytes == 0) 3777 return 0; 3778 3779 spin_lock(&root->qgroup_meta_rsv_lock); 3780 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 3781 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 3782 num_bytes); 3783 root->qgroup_meta_rsv_prealloc -= num_bytes; 3784 } else { 3785 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 3786 num_bytes); 3787 root->qgroup_meta_rsv_pertrans -= num_bytes; 3788 } 3789 spin_unlock(&root->qgroup_meta_rsv_lock); 3790 return num_bytes; 3791 } 3792 3793 static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3794 enum btrfs_qgroup_rsv_type type, bool enforce) 3795 { 3796 struct btrfs_fs_info *fs_info = root->fs_info; 3797 int ret; 3798 3799 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3800 !is_fstree(root->root_key.objectid) || num_bytes == 0) 3801 return 0; 3802 3803 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3804 trace_qgroup_meta_reserve(root, (s64)num_bytes, type); 3805 ret = qgroup_reserve(root, num_bytes, enforce, type); 3806 if (ret < 0) 3807 return ret; 3808 /* 3809 * Record what we have reserved into root. 3810 * 3811 * To avoid quota disabled->enabled underflow. 3812 * In that case, we may try to free space we haven't reserved 3813 * (since quota was disabled), so record what we reserved into root. 3814 * And ensure later release won't underflow this number. 3815 */ 3816 add_root_meta_rsv(root, num_bytes, type); 3817 return ret; 3818 } 3819 3820 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3821 enum btrfs_qgroup_rsv_type type, bool enforce) 3822 { 3823 int ret; 3824 3825 ret = qgroup_reserve_meta(root, num_bytes, type, enforce); 3826 if (ret <= 0 && ret != -EDQUOT) 3827 return ret; 3828 3829 ret = try_flush_qgroup(root); 3830 if (ret < 0) 3831 return ret; 3832 return qgroup_reserve_meta(root, num_bytes, type, enforce); 3833 } 3834 3835 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 3836 { 3837 struct btrfs_fs_info *fs_info = root->fs_info; 3838 3839 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3840 !is_fstree(root->root_key.objectid)) 3841 return; 3842 3843 /* TODO: Update trace point to handle such free */ 3844 trace_qgroup_meta_free_all_pertrans(root); 3845 /* Special value -1 means to free all reserved space */ 3846 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1, 3847 BTRFS_QGROUP_RSV_META_PERTRANS); 3848 } 3849 3850 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 3851 enum btrfs_qgroup_rsv_type type) 3852 { 3853 struct btrfs_fs_info *fs_info = root->fs_info; 3854 3855 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3856 !is_fstree(root->root_key.objectid)) 3857 return; 3858 3859 /* 3860 * reservation for META_PREALLOC can happen before quota is enabled, 3861 * which can lead to underflow. 3862 * Here ensure we will only free what we really have reserved. 3863 */ 3864 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 3865 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3866 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); 3867 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, 3868 num_bytes, type); 3869 } 3870 3871 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 3872 int num_bytes) 3873 { 3874 struct btrfs_qgroup *qgroup; 3875 struct ulist_node *unode; 3876 struct ulist_iterator uiter; 3877 int ret = 0; 3878 3879 if (num_bytes == 0) 3880 return; 3881 if (!fs_info->quota_root) 3882 return; 3883 3884 spin_lock(&fs_info->qgroup_lock); 3885 qgroup = find_qgroup_rb(fs_info, ref_root); 3886 if (!qgroup) 3887 goto out; 3888 ulist_reinit(fs_info->qgroup_ulist); 3889 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3890 qgroup_to_aux(qgroup), GFP_ATOMIC); 3891 if (ret < 0) 3892 goto out; 3893 ULIST_ITER_INIT(&uiter); 3894 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3895 struct btrfs_qgroup *qg; 3896 struct btrfs_qgroup_list *glist; 3897 3898 qg = unode_aux_to_qgroup(unode); 3899 3900 qgroup_rsv_release(fs_info, qg, num_bytes, 3901 BTRFS_QGROUP_RSV_META_PREALLOC); 3902 qgroup_rsv_add(fs_info, qg, num_bytes, 3903 BTRFS_QGROUP_RSV_META_PERTRANS); 3904 list_for_each_entry(glist, &qg->groups, next_group) { 3905 ret = ulist_add(fs_info->qgroup_ulist, 3906 glist->group->qgroupid, 3907 qgroup_to_aux(glist->group), GFP_ATOMIC); 3908 if (ret < 0) 3909 goto out; 3910 } 3911 } 3912 out: 3913 spin_unlock(&fs_info->qgroup_lock); 3914 } 3915 3916 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 3917 { 3918 struct btrfs_fs_info *fs_info = root->fs_info; 3919 3920 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3921 !is_fstree(root->root_key.objectid)) 3922 return; 3923 /* Same as btrfs_qgroup_free_meta_prealloc() */ 3924 num_bytes = sub_root_meta_rsv(root, num_bytes, 3925 BTRFS_QGROUP_RSV_META_PREALLOC); 3926 trace_qgroup_meta_convert(root, num_bytes); 3927 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes); 3928 } 3929 3930 /* 3931 * Check qgroup reserved space leaking, normally at destroy inode 3932 * time 3933 */ 3934 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 3935 { 3936 struct extent_changeset changeset; 3937 struct ulist_node *unode; 3938 struct ulist_iterator iter; 3939 int ret; 3940 3941 extent_changeset_init(&changeset); 3942 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 3943 EXTENT_QGROUP_RESERVED, &changeset); 3944 3945 WARN_ON(ret < 0); 3946 if (WARN_ON(changeset.bytes_changed)) { 3947 ULIST_ITER_INIT(&iter); 3948 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 3949 btrfs_warn(inode->root->fs_info, 3950 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 3951 btrfs_ino(inode), unode->val, unode->aux); 3952 } 3953 btrfs_qgroup_free_refroot(inode->root->fs_info, 3954 inode->root->root_key.objectid, 3955 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3956 3957 } 3958 extent_changeset_release(&changeset); 3959 } 3960 3961 void btrfs_qgroup_init_swapped_blocks( 3962 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 3963 { 3964 int i; 3965 3966 spin_lock_init(&swapped_blocks->lock); 3967 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 3968 swapped_blocks->blocks[i] = RB_ROOT; 3969 swapped_blocks->swapped = false; 3970 } 3971 3972 /* 3973 * Delete all swapped blocks record of @root. 3974 * Every record here means we skipped a full subtree scan for qgroup. 3975 * 3976 * Gets called when committing one transaction. 3977 */ 3978 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 3979 { 3980 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 3981 int i; 3982 3983 swapped_blocks = &root->swapped_blocks; 3984 3985 spin_lock(&swapped_blocks->lock); 3986 if (!swapped_blocks->swapped) 3987 goto out; 3988 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 3989 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 3990 struct btrfs_qgroup_swapped_block *entry; 3991 struct btrfs_qgroup_swapped_block *next; 3992 3993 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 3994 node) 3995 kfree(entry); 3996 swapped_blocks->blocks[i] = RB_ROOT; 3997 } 3998 swapped_blocks->swapped = false; 3999 out: 4000 spin_unlock(&swapped_blocks->lock); 4001 } 4002 4003 /* 4004 * Add subtree roots record into @subvol_root. 4005 * 4006 * @subvol_root: tree root of the subvolume tree get swapped 4007 * @bg: block group under balance 4008 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4009 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4010 * BOTH POINTERS ARE BEFORE TREE SWAP 4011 * @last_snapshot: last snapshot generation of the subvolume tree 4012 */ 4013 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, 4014 struct btrfs_root *subvol_root, 4015 struct btrfs_block_group *bg, 4016 struct extent_buffer *subvol_parent, int subvol_slot, 4017 struct extent_buffer *reloc_parent, int reloc_slot, 4018 u64 last_snapshot) 4019 { 4020 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4021 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4022 struct btrfs_qgroup_swapped_block *block; 4023 struct rb_node **cur; 4024 struct rb_node *parent = NULL; 4025 int level = btrfs_header_level(subvol_parent) - 1; 4026 int ret = 0; 4027 4028 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4029 return 0; 4030 4031 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4032 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { 4033 btrfs_err_rl(fs_info, 4034 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4035 __func__, 4036 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4037 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4038 return -EUCLEAN; 4039 } 4040 4041 block = kmalloc(sizeof(*block), GFP_NOFS); 4042 if (!block) { 4043 ret = -ENOMEM; 4044 goto out; 4045 } 4046 4047 /* 4048 * @reloc_parent/slot is still before swap, while @block is going to 4049 * record the bytenr after swap, so we do the swap here. 4050 */ 4051 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4052 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4053 reloc_slot); 4054 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4055 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4056 subvol_slot); 4057 block->last_snapshot = last_snapshot; 4058 block->level = level; 4059 4060 /* 4061 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4062 * no one else can modify tree blocks thus we qgroup will not change 4063 * no matter the value of trace_leaf. 4064 */ 4065 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4066 block->trace_leaf = true; 4067 else 4068 block->trace_leaf = false; 4069 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4070 4071 /* Insert @block into @blocks */ 4072 spin_lock(&blocks->lock); 4073 cur = &blocks->blocks[level].rb_node; 4074 while (*cur) { 4075 struct btrfs_qgroup_swapped_block *entry; 4076 4077 parent = *cur; 4078 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, 4079 node); 4080 4081 if (entry->subvol_bytenr < block->subvol_bytenr) { 4082 cur = &(*cur)->rb_left; 4083 } else if (entry->subvol_bytenr > block->subvol_bytenr) { 4084 cur = &(*cur)->rb_right; 4085 } else { 4086 if (entry->subvol_generation != 4087 block->subvol_generation || 4088 entry->reloc_bytenr != block->reloc_bytenr || 4089 entry->reloc_generation != 4090 block->reloc_generation) { 4091 /* 4092 * Duplicated but mismatch entry found. 4093 * Shouldn't happen. 4094 * 4095 * Marking qgroup inconsistent should be enough 4096 * for end users. 4097 */ 4098 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4099 ret = -EEXIST; 4100 } 4101 kfree(block); 4102 goto out_unlock; 4103 } 4104 } 4105 rb_link_node(&block->node, parent, cur); 4106 rb_insert_color(&block->node, &blocks->blocks[level]); 4107 blocks->swapped = true; 4108 out_unlock: 4109 spin_unlock(&blocks->lock); 4110 out: 4111 if (ret < 0) 4112 fs_info->qgroup_flags |= 4113 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 4114 return ret; 4115 } 4116 4117 /* 4118 * Check if the tree block is a subtree root, and if so do the needed 4119 * delayed subtree trace for qgroup. 4120 * 4121 * This is called during btrfs_cow_block(). 4122 */ 4123 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4124 struct btrfs_root *root, 4125 struct extent_buffer *subvol_eb) 4126 { 4127 struct btrfs_fs_info *fs_info = root->fs_info; 4128 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4129 struct btrfs_qgroup_swapped_block *block; 4130 struct extent_buffer *reloc_eb = NULL; 4131 struct rb_node *node; 4132 bool found = false; 4133 bool swapped = false; 4134 int level = btrfs_header_level(subvol_eb); 4135 int ret = 0; 4136 int i; 4137 4138 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4139 return 0; 4140 if (!is_fstree(root->root_key.objectid) || !root->reloc_root) 4141 return 0; 4142 4143 spin_lock(&blocks->lock); 4144 if (!blocks->swapped) { 4145 spin_unlock(&blocks->lock); 4146 return 0; 4147 } 4148 node = blocks->blocks[level].rb_node; 4149 4150 while (node) { 4151 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4152 if (block->subvol_bytenr < subvol_eb->start) { 4153 node = node->rb_left; 4154 } else if (block->subvol_bytenr > subvol_eb->start) { 4155 node = node->rb_right; 4156 } else { 4157 found = true; 4158 break; 4159 } 4160 } 4161 if (!found) { 4162 spin_unlock(&blocks->lock); 4163 goto out; 4164 } 4165 /* Found one, remove it from @blocks first and update blocks->swapped */ 4166 rb_erase(&block->node, &blocks->blocks[level]); 4167 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4168 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4169 swapped = true; 4170 break; 4171 } 4172 } 4173 blocks->swapped = swapped; 4174 spin_unlock(&blocks->lock); 4175 4176 /* Read out reloc subtree root */ 4177 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 4178 block->reloc_generation, block->level, 4179 &block->first_key); 4180 if (IS_ERR(reloc_eb)) { 4181 ret = PTR_ERR(reloc_eb); 4182 reloc_eb = NULL; 4183 goto free_out; 4184 } 4185 if (!extent_buffer_uptodate(reloc_eb)) { 4186 ret = -EIO; 4187 goto free_out; 4188 } 4189 4190 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4191 block->last_snapshot, block->trace_leaf); 4192 free_out: 4193 kfree(block); 4194 free_extent_buffer(reloc_eb); 4195 out: 4196 if (ret < 0) { 4197 btrfs_err_rl(fs_info, 4198 "failed to account subtree at bytenr %llu: %d", 4199 subvol_eb->start, ret); 4200 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 4201 } 4202 return ret; 4203 } 4204 4205 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4206 { 4207 struct btrfs_qgroup_extent_record *entry; 4208 struct btrfs_qgroup_extent_record *next; 4209 struct rb_root *root; 4210 4211 root = &trans->delayed_refs.dirty_extent_root; 4212 rbtree_postorder_for_each_entry_safe(entry, next, root, node) { 4213 ulist_free(entry->old_roots); 4214 kfree(entry); 4215 } 4216 } 4217