1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include <linux/workqueue.h> 26 27 #include "ctree.h" 28 #include "transaction.h" 29 #include "disk-io.h" 30 #include "locking.h" 31 #include "ulist.h" 32 #include "ioctl.h" 33 #include "backref.h" 34 35 /* TODO XXX FIXME 36 * - subvol delete -> delete when ref goes to 0? delete limits also? 37 * - reorganize keys 38 * - compressed 39 * - sync 40 * - rescan 41 * - copy also limits on subvol creation 42 * - limit 43 * - caches fuer ulists 44 * - performance benchmarks 45 * - check all ioctl parameters 46 */ 47 48 /* 49 * one struct for each qgroup, organized in fs_info->qgroup_tree. 50 */ 51 struct btrfs_qgroup { 52 u64 qgroupid; 53 54 /* 55 * state 56 */ 57 u64 rfer; /* referenced */ 58 u64 rfer_cmpr; /* referenced compressed */ 59 u64 excl; /* exclusive */ 60 u64 excl_cmpr; /* exclusive compressed */ 61 62 /* 63 * limits 64 */ 65 u64 lim_flags; /* which limits are set */ 66 u64 max_rfer; 67 u64 max_excl; 68 u64 rsv_rfer; 69 u64 rsv_excl; 70 71 /* 72 * reservation tracking 73 */ 74 u64 reserved; 75 76 /* 77 * lists 78 */ 79 struct list_head groups; /* groups this group is member of */ 80 struct list_head members; /* groups that are members of this group */ 81 struct list_head dirty; /* dirty groups */ 82 struct rb_node node; /* tree of qgroups */ 83 84 /* 85 * temp variables for accounting operations 86 */ 87 u64 tag; 88 u64 refcnt; 89 }; 90 91 /* 92 * glue structure to represent the relations between qgroups. 93 */ 94 struct btrfs_qgroup_list { 95 struct list_head next_group; 96 struct list_head next_member; 97 struct btrfs_qgroup *group; 98 struct btrfs_qgroup *member; 99 }; 100 101 /* must be called with qgroup_lock held */ 102 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 103 u64 qgroupid) 104 { 105 struct rb_node *n = fs_info->qgroup_tree.rb_node; 106 struct btrfs_qgroup *qgroup; 107 108 while (n) { 109 qgroup = rb_entry(n, struct btrfs_qgroup, node); 110 if (qgroup->qgroupid < qgroupid) 111 n = n->rb_left; 112 else if (qgroup->qgroupid > qgroupid) 113 n = n->rb_right; 114 else 115 return qgroup; 116 } 117 return NULL; 118 } 119 120 /* must be called with qgroup_lock held */ 121 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 122 u64 qgroupid) 123 { 124 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 125 struct rb_node *parent = NULL; 126 struct btrfs_qgroup *qgroup; 127 128 while (*p) { 129 parent = *p; 130 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 131 132 if (qgroup->qgroupid < qgroupid) 133 p = &(*p)->rb_left; 134 else if (qgroup->qgroupid > qgroupid) 135 p = &(*p)->rb_right; 136 else 137 return qgroup; 138 } 139 140 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 141 if (!qgroup) 142 return ERR_PTR(-ENOMEM); 143 144 qgroup->qgroupid = qgroupid; 145 INIT_LIST_HEAD(&qgroup->groups); 146 INIT_LIST_HEAD(&qgroup->members); 147 INIT_LIST_HEAD(&qgroup->dirty); 148 149 rb_link_node(&qgroup->node, parent, p); 150 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 151 152 return qgroup; 153 } 154 155 /* must be called with qgroup_lock held */ 156 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 157 { 158 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 159 struct btrfs_qgroup_list *list; 160 161 if (!qgroup) 162 return -ENOENT; 163 164 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 165 list_del(&qgroup->dirty); 166 167 while (!list_empty(&qgroup->groups)) { 168 list = list_first_entry(&qgroup->groups, 169 struct btrfs_qgroup_list, next_group); 170 list_del(&list->next_group); 171 list_del(&list->next_member); 172 kfree(list); 173 } 174 175 while (!list_empty(&qgroup->members)) { 176 list = list_first_entry(&qgroup->members, 177 struct btrfs_qgroup_list, next_member); 178 list_del(&list->next_group); 179 list_del(&list->next_member); 180 kfree(list); 181 } 182 kfree(qgroup); 183 184 return 0; 185 } 186 187 /* must be called with qgroup_lock held */ 188 static int add_relation_rb(struct btrfs_fs_info *fs_info, 189 u64 memberid, u64 parentid) 190 { 191 struct btrfs_qgroup *member; 192 struct btrfs_qgroup *parent; 193 struct btrfs_qgroup_list *list; 194 195 member = find_qgroup_rb(fs_info, memberid); 196 parent = find_qgroup_rb(fs_info, parentid); 197 if (!member || !parent) 198 return -ENOENT; 199 200 list = kzalloc(sizeof(*list), GFP_ATOMIC); 201 if (!list) 202 return -ENOMEM; 203 204 list->group = parent; 205 list->member = member; 206 list_add_tail(&list->next_group, &member->groups); 207 list_add_tail(&list->next_member, &parent->members); 208 209 return 0; 210 } 211 212 /* must be called with qgroup_lock held */ 213 static int del_relation_rb(struct btrfs_fs_info *fs_info, 214 u64 memberid, u64 parentid) 215 { 216 struct btrfs_qgroup *member; 217 struct btrfs_qgroup *parent; 218 struct btrfs_qgroup_list *list; 219 220 member = find_qgroup_rb(fs_info, memberid); 221 parent = find_qgroup_rb(fs_info, parentid); 222 if (!member || !parent) 223 return -ENOENT; 224 225 list_for_each_entry(list, &member->groups, next_group) { 226 if (list->group == parent) { 227 list_del(&list->next_group); 228 list_del(&list->next_member); 229 kfree(list); 230 return 0; 231 } 232 } 233 return -ENOENT; 234 } 235 236 /* 237 * The full config is read in one go, only called from open_ctree() 238 * It doesn't use any locking, as at this point we're still single-threaded 239 */ 240 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 241 { 242 struct btrfs_key key; 243 struct btrfs_key found_key; 244 struct btrfs_root *quota_root = fs_info->quota_root; 245 struct btrfs_path *path = NULL; 246 struct extent_buffer *l; 247 int slot; 248 int ret = 0; 249 u64 flags = 0; 250 251 if (!fs_info->quota_enabled) 252 return 0; 253 254 path = btrfs_alloc_path(); 255 if (!path) { 256 ret = -ENOMEM; 257 goto out; 258 } 259 260 /* default this to quota off, in case no status key is found */ 261 fs_info->qgroup_flags = 0; 262 263 /* 264 * pass 1: read status, all qgroup infos and limits 265 */ 266 key.objectid = 0; 267 key.type = 0; 268 key.offset = 0; 269 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 270 if (ret) 271 goto out; 272 273 while (1) { 274 struct btrfs_qgroup *qgroup; 275 276 slot = path->slots[0]; 277 l = path->nodes[0]; 278 btrfs_item_key_to_cpu(l, &found_key, slot); 279 280 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 281 struct btrfs_qgroup_status_item *ptr; 282 283 ptr = btrfs_item_ptr(l, slot, 284 struct btrfs_qgroup_status_item); 285 286 if (btrfs_qgroup_status_version(l, ptr) != 287 BTRFS_QGROUP_STATUS_VERSION) { 288 printk(KERN_ERR 289 "btrfs: old qgroup version, quota disabled\n"); 290 goto out; 291 } 292 if (btrfs_qgroup_status_generation(l, ptr) != 293 fs_info->generation) { 294 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 295 printk(KERN_ERR 296 "btrfs: qgroup generation mismatch, " 297 "marked as inconsistent\n"); 298 } 299 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 300 ptr); 301 /* FIXME read scan element */ 302 goto next1; 303 } 304 305 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 306 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 307 goto next1; 308 309 qgroup = find_qgroup_rb(fs_info, found_key.offset); 310 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 311 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 312 printk(KERN_ERR "btrfs: inconsitent qgroup config\n"); 313 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 314 } 315 if (!qgroup) { 316 qgroup = add_qgroup_rb(fs_info, found_key.offset); 317 if (IS_ERR(qgroup)) { 318 ret = PTR_ERR(qgroup); 319 goto out; 320 } 321 } 322 switch (found_key.type) { 323 case BTRFS_QGROUP_INFO_KEY: { 324 struct btrfs_qgroup_info_item *ptr; 325 326 ptr = btrfs_item_ptr(l, slot, 327 struct btrfs_qgroup_info_item); 328 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 329 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 330 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 331 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 332 /* generation currently unused */ 333 break; 334 } 335 case BTRFS_QGROUP_LIMIT_KEY: { 336 struct btrfs_qgroup_limit_item *ptr; 337 338 ptr = btrfs_item_ptr(l, slot, 339 struct btrfs_qgroup_limit_item); 340 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 341 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 342 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 343 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 344 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 345 break; 346 } 347 } 348 next1: 349 ret = btrfs_next_item(quota_root, path); 350 if (ret < 0) 351 goto out; 352 if (ret) 353 break; 354 } 355 btrfs_release_path(path); 356 357 /* 358 * pass 2: read all qgroup relations 359 */ 360 key.objectid = 0; 361 key.type = BTRFS_QGROUP_RELATION_KEY; 362 key.offset = 0; 363 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 364 if (ret) 365 goto out; 366 while (1) { 367 slot = path->slots[0]; 368 l = path->nodes[0]; 369 btrfs_item_key_to_cpu(l, &found_key, slot); 370 371 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 372 goto next2; 373 374 if (found_key.objectid > found_key.offset) { 375 /* parent <- member, not needed to build config */ 376 /* FIXME should we omit the key completely? */ 377 goto next2; 378 } 379 380 ret = add_relation_rb(fs_info, found_key.objectid, 381 found_key.offset); 382 if (ret) 383 goto out; 384 next2: 385 ret = btrfs_next_item(quota_root, path); 386 if (ret < 0) 387 goto out; 388 if (ret) 389 break; 390 } 391 out: 392 fs_info->qgroup_flags |= flags; 393 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { 394 fs_info->quota_enabled = 0; 395 fs_info->pending_quota_state = 0; 396 } 397 btrfs_free_path(path); 398 399 return ret < 0 ? ret : 0; 400 } 401 402 /* 403 * This is only called from close_ctree() or open_ctree(), both in single- 404 * treaded paths. Clean up the in-memory structures. No locking needed. 405 */ 406 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 407 { 408 struct rb_node *n; 409 struct btrfs_qgroup *qgroup; 410 struct btrfs_qgroup_list *list; 411 412 while ((n = rb_first(&fs_info->qgroup_tree))) { 413 qgroup = rb_entry(n, struct btrfs_qgroup, node); 414 rb_erase(n, &fs_info->qgroup_tree); 415 416 WARN_ON(!list_empty(&qgroup->dirty)); 417 418 while (!list_empty(&qgroup->groups)) { 419 list = list_first_entry(&qgroup->groups, 420 struct btrfs_qgroup_list, 421 next_group); 422 list_del(&list->next_group); 423 list_del(&list->next_member); 424 kfree(list); 425 } 426 427 while (!list_empty(&qgroup->members)) { 428 list = list_first_entry(&qgroup->members, 429 struct btrfs_qgroup_list, 430 next_member); 431 list_del(&list->next_group); 432 list_del(&list->next_member); 433 kfree(list); 434 } 435 kfree(qgroup); 436 } 437 } 438 439 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, 440 struct btrfs_root *quota_root, 441 u64 src, u64 dst) 442 { 443 int ret; 444 struct btrfs_path *path; 445 struct btrfs_key key; 446 447 path = btrfs_alloc_path(); 448 if (!path) 449 return -ENOMEM; 450 451 key.objectid = src; 452 key.type = BTRFS_QGROUP_RELATION_KEY; 453 key.offset = dst; 454 455 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 456 457 btrfs_mark_buffer_dirty(path->nodes[0]); 458 459 btrfs_free_path(path); 460 return ret; 461 } 462 463 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, 464 struct btrfs_root *quota_root, 465 u64 src, u64 dst) 466 { 467 int ret; 468 struct btrfs_path *path; 469 struct btrfs_key key; 470 471 path = btrfs_alloc_path(); 472 if (!path) 473 return -ENOMEM; 474 475 key.objectid = src; 476 key.type = BTRFS_QGROUP_RELATION_KEY; 477 key.offset = dst; 478 479 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 480 if (ret < 0) 481 goto out; 482 483 if (ret > 0) { 484 ret = -ENOENT; 485 goto out; 486 } 487 488 ret = btrfs_del_item(trans, quota_root, path); 489 out: 490 btrfs_free_path(path); 491 return ret; 492 } 493 494 static int add_qgroup_item(struct btrfs_trans_handle *trans, 495 struct btrfs_root *quota_root, u64 qgroupid) 496 { 497 int ret; 498 struct btrfs_path *path; 499 struct btrfs_qgroup_info_item *qgroup_info; 500 struct btrfs_qgroup_limit_item *qgroup_limit; 501 struct extent_buffer *leaf; 502 struct btrfs_key key; 503 504 path = btrfs_alloc_path(); 505 if (!path) 506 return -ENOMEM; 507 508 key.objectid = 0; 509 key.type = BTRFS_QGROUP_INFO_KEY; 510 key.offset = qgroupid; 511 512 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 513 sizeof(*qgroup_info)); 514 if (ret) 515 goto out; 516 517 leaf = path->nodes[0]; 518 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 519 struct btrfs_qgroup_info_item); 520 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 521 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 522 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 523 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 524 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 525 526 btrfs_mark_buffer_dirty(leaf); 527 528 btrfs_release_path(path); 529 530 key.type = BTRFS_QGROUP_LIMIT_KEY; 531 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 532 sizeof(*qgroup_limit)); 533 if (ret) 534 goto out; 535 536 leaf = path->nodes[0]; 537 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 538 struct btrfs_qgroup_limit_item); 539 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 540 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 541 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 542 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 543 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 544 545 btrfs_mark_buffer_dirty(leaf); 546 547 ret = 0; 548 out: 549 btrfs_free_path(path); 550 return ret; 551 } 552 553 static int del_qgroup_item(struct btrfs_trans_handle *trans, 554 struct btrfs_root *quota_root, u64 qgroupid) 555 { 556 int ret; 557 struct btrfs_path *path; 558 struct btrfs_key key; 559 560 path = btrfs_alloc_path(); 561 if (!path) 562 return -ENOMEM; 563 564 key.objectid = 0; 565 key.type = BTRFS_QGROUP_INFO_KEY; 566 key.offset = qgroupid; 567 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 568 if (ret < 0) 569 goto out; 570 571 if (ret > 0) { 572 ret = -ENOENT; 573 goto out; 574 } 575 576 ret = btrfs_del_item(trans, quota_root, path); 577 if (ret) 578 goto out; 579 580 btrfs_release_path(path); 581 582 key.type = BTRFS_QGROUP_LIMIT_KEY; 583 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 584 if (ret < 0) 585 goto out; 586 587 if (ret > 0) { 588 ret = -ENOENT; 589 goto out; 590 } 591 592 ret = btrfs_del_item(trans, quota_root, path); 593 594 out: 595 btrfs_free_path(path); 596 return ret; 597 } 598 599 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 600 struct btrfs_root *root, u64 qgroupid, 601 u64 flags, u64 max_rfer, u64 max_excl, 602 u64 rsv_rfer, u64 rsv_excl) 603 { 604 struct btrfs_path *path; 605 struct btrfs_key key; 606 struct extent_buffer *l; 607 struct btrfs_qgroup_limit_item *qgroup_limit; 608 int ret; 609 int slot; 610 611 key.objectid = 0; 612 key.type = BTRFS_QGROUP_LIMIT_KEY; 613 key.offset = qgroupid; 614 615 path = btrfs_alloc_path(); 616 BUG_ON(!path); 617 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 618 if (ret > 0) 619 ret = -ENOENT; 620 621 if (ret) 622 goto out; 623 624 l = path->nodes[0]; 625 slot = path->slots[0]; 626 qgroup_limit = btrfs_item_ptr(l, path->slots[0], 627 struct btrfs_qgroup_limit_item); 628 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags); 629 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer); 630 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl); 631 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer); 632 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl); 633 634 btrfs_mark_buffer_dirty(l); 635 636 out: 637 btrfs_free_path(path); 638 return ret; 639 } 640 641 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 642 struct btrfs_root *root, 643 struct btrfs_qgroup *qgroup) 644 { 645 struct btrfs_path *path; 646 struct btrfs_key key; 647 struct extent_buffer *l; 648 struct btrfs_qgroup_info_item *qgroup_info; 649 int ret; 650 int slot; 651 652 key.objectid = 0; 653 key.type = BTRFS_QGROUP_INFO_KEY; 654 key.offset = qgroup->qgroupid; 655 656 path = btrfs_alloc_path(); 657 BUG_ON(!path); 658 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 659 if (ret > 0) 660 ret = -ENOENT; 661 662 if (ret) 663 goto out; 664 665 l = path->nodes[0]; 666 slot = path->slots[0]; 667 qgroup_info = btrfs_item_ptr(l, path->slots[0], 668 struct btrfs_qgroup_info_item); 669 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 670 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 671 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 672 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 673 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 674 675 btrfs_mark_buffer_dirty(l); 676 677 out: 678 btrfs_free_path(path); 679 return ret; 680 } 681 682 static int update_qgroup_status_item(struct btrfs_trans_handle *trans, 683 struct btrfs_fs_info *fs_info, 684 struct btrfs_root *root) 685 { 686 struct btrfs_path *path; 687 struct btrfs_key key; 688 struct extent_buffer *l; 689 struct btrfs_qgroup_status_item *ptr; 690 int ret; 691 int slot; 692 693 key.objectid = 0; 694 key.type = BTRFS_QGROUP_STATUS_KEY; 695 key.offset = 0; 696 697 path = btrfs_alloc_path(); 698 BUG_ON(!path); 699 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 700 if (ret > 0) 701 ret = -ENOENT; 702 703 if (ret) 704 goto out; 705 706 l = path->nodes[0]; 707 slot = path->slots[0]; 708 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 709 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); 710 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 711 /* XXX scan */ 712 713 btrfs_mark_buffer_dirty(l); 714 715 out: 716 btrfs_free_path(path); 717 return ret; 718 } 719 720 /* 721 * called with qgroup_lock held 722 */ 723 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 724 struct btrfs_root *root) 725 { 726 struct btrfs_path *path; 727 struct btrfs_key key; 728 int ret; 729 730 if (!root) 731 return -EINVAL; 732 733 path = btrfs_alloc_path(); 734 if (!path) 735 return -ENOMEM; 736 737 while (1) { 738 key.objectid = 0; 739 key.offset = 0; 740 key.type = 0; 741 742 path->leave_spinning = 1; 743 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 744 if (ret > 0) { 745 if (path->slots[0] == 0) 746 break; 747 path->slots[0]--; 748 } else if (ret < 0) { 749 break; 750 } 751 752 ret = btrfs_del_item(trans, root, path); 753 if (ret) 754 goto out; 755 btrfs_release_path(path); 756 } 757 ret = 0; 758 out: 759 root->fs_info->pending_quota_state = 0; 760 btrfs_free_path(path); 761 return ret; 762 } 763 764 int btrfs_quota_enable(struct btrfs_trans_handle *trans, 765 struct btrfs_fs_info *fs_info) 766 { 767 struct btrfs_root *quota_root; 768 struct btrfs_path *path = NULL; 769 struct btrfs_qgroup_status_item *ptr; 770 struct extent_buffer *leaf; 771 struct btrfs_key key; 772 int ret = 0; 773 774 spin_lock(&fs_info->qgroup_lock); 775 if (fs_info->quota_root) { 776 fs_info->pending_quota_state = 1; 777 spin_unlock(&fs_info->qgroup_lock); 778 goto out; 779 } 780 spin_unlock(&fs_info->qgroup_lock); 781 782 /* 783 * initially create the quota tree 784 */ 785 quota_root = btrfs_create_tree(trans, fs_info, 786 BTRFS_QUOTA_TREE_OBJECTID); 787 if (IS_ERR(quota_root)) { 788 ret = PTR_ERR(quota_root); 789 goto out; 790 } 791 792 path = btrfs_alloc_path(); 793 if (!path) 794 return -ENOMEM; 795 796 key.objectid = 0; 797 key.type = BTRFS_QGROUP_STATUS_KEY; 798 key.offset = 0; 799 800 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 801 sizeof(*ptr)); 802 if (ret) 803 goto out; 804 805 leaf = path->nodes[0]; 806 ptr = btrfs_item_ptr(leaf, path->slots[0], 807 struct btrfs_qgroup_status_item); 808 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 809 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 810 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 811 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 812 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); 813 btrfs_set_qgroup_status_scan(leaf, ptr, 0); 814 815 btrfs_mark_buffer_dirty(leaf); 816 817 spin_lock(&fs_info->qgroup_lock); 818 fs_info->quota_root = quota_root; 819 fs_info->pending_quota_state = 1; 820 spin_unlock(&fs_info->qgroup_lock); 821 out: 822 btrfs_free_path(path); 823 return ret; 824 } 825 826 int btrfs_quota_disable(struct btrfs_trans_handle *trans, 827 struct btrfs_fs_info *fs_info) 828 { 829 struct btrfs_root *tree_root = fs_info->tree_root; 830 struct btrfs_root *quota_root; 831 int ret = 0; 832 833 spin_lock(&fs_info->qgroup_lock); 834 fs_info->quota_enabled = 0; 835 fs_info->pending_quota_state = 0; 836 quota_root = fs_info->quota_root; 837 fs_info->quota_root = NULL; 838 btrfs_free_qgroup_config(fs_info); 839 spin_unlock(&fs_info->qgroup_lock); 840 841 if (!quota_root) 842 return -EINVAL; 843 844 ret = btrfs_clean_quota_tree(trans, quota_root); 845 if (ret) 846 goto out; 847 848 ret = btrfs_del_root(trans, tree_root, "a_root->root_key); 849 if (ret) 850 goto out; 851 852 list_del("a_root->dirty_list); 853 854 btrfs_tree_lock(quota_root->node); 855 clean_tree_block(trans, tree_root, quota_root->node); 856 btrfs_tree_unlock(quota_root->node); 857 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 858 859 free_extent_buffer(quota_root->node); 860 free_extent_buffer(quota_root->commit_root); 861 kfree(quota_root); 862 out: 863 return ret; 864 } 865 866 int btrfs_quota_rescan(struct btrfs_fs_info *fs_info) 867 { 868 /* FIXME */ 869 return 0; 870 } 871 872 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, 873 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 874 { 875 struct btrfs_root *quota_root; 876 int ret = 0; 877 878 quota_root = fs_info->quota_root; 879 if (!quota_root) 880 return -EINVAL; 881 882 ret = add_qgroup_relation_item(trans, quota_root, src, dst); 883 if (ret) 884 return ret; 885 886 ret = add_qgroup_relation_item(trans, quota_root, dst, src); 887 if (ret) { 888 del_qgroup_relation_item(trans, quota_root, src, dst); 889 return ret; 890 } 891 892 spin_lock(&fs_info->qgroup_lock); 893 ret = add_relation_rb(quota_root->fs_info, src, dst); 894 spin_unlock(&fs_info->qgroup_lock); 895 896 return ret; 897 } 898 899 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, 900 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 901 { 902 struct btrfs_root *quota_root; 903 int ret = 0; 904 int err; 905 906 quota_root = fs_info->quota_root; 907 if (!quota_root) 908 return -EINVAL; 909 910 ret = del_qgroup_relation_item(trans, quota_root, src, dst); 911 err = del_qgroup_relation_item(trans, quota_root, dst, src); 912 if (err && !ret) 913 ret = err; 914 915 spin_lock(&fs_info->qgroup_lock); 916 del_relation_rb(fs_info, src, dst); 917 918 spin_unlock(&fs_info->qgroup_lock); 919 920 return ret; 921 } 922 923 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, 924 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name) 925 { 926 struct btrfs_root *quota_root; 927 struct btrfs_qgroup *qgroup; 928 int ret = 0; 929 930 quota_root = fs_info->quota_root; 931 if (!quota_root) 932 return -EINVAL; 933 934 ret = add_qgroup_item(trans, quota_root, qgroupid); 935 936 spin_lock(&fs_info->qgroup_lock); 937 qgroup = add_qgroup_rb(fs_info, qgroupid); 938 spin_unlock(&fs_info->qgroup_lock); 939 940 if (IS_ERR(qgroup)) 941 ret = PTR_ERR(qgroup); 942 943 return ret; 944 } 945 946 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, 947 struct btrfs_fs_info *fs_info, u64 qgroupid) 948 { 949 struct btrfs_root *quota_root; 950 int ret = 0; 951 952 quota_root = fs_info->quota_root; 953 if (!quota_root) 954 return -EINVAL; 955 956 ret = del_qgroup_item(trans, quota_root, qgroupid); 957 958 spin_lock(&fs_info->qgroup_lock); 959 del_qgroup_rb(quota_root->fs_info, qgroupid); 960 961 spin_unlock(&fs_info->qgroup_lock); 962 963 return ret; 964 } 965 966 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, 967 struct btrfs_fs_info *fs_info, u64 qgroupid, 968 struct btrfs_qgroup_limit *limit) 969 { 970 struct btrfs_root *quota_root = fs_info->quota_root; 971 struct btrfs_qgroup *qgroup; 972 int ret = 0; 973 974 if (!quota_root) 975 return -EINVAL; 976 977 ret = update_qgroup_limit_item(trans, quota_root, qgroupid, 978 limit->flags, limit->max_rfer, 979 limit->max_excl, limit->rsv_rfer, 980 limit->rsv_excl); 981 if (ret) { 982 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 983 printk(KERN_INFO "unable to update quota limit for %llu\n", 984 (unsigned long long)qgroupid); 985 } 986 987 spin_lock(&fs_info->qgroup_lock); 988 989 qgroup = find_qgroup_rb(fs_info, qgroupid); 990 if (!qgroup) { 991 ret = -ENOENT; 992 goto unlock; 993 } 994 qgroup->lim_flags = limit->flags; 995 qgroup->max_rfer = limit->max_rfer; 996 qgroup->max_excl = limit->max_excl; 997 qgroup->rsv_rfer = limit->rsv_rfer; 998 qgroup->rsv_excl = limit->rsv_excl; 999 1000 unlock: 1001 spin_unlock(&fs_info->qgroup_lock); 1002 1003 return ret; 1004 } 1005 1006 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1007 struct btrfs_qgroup *qgroup) 1008 { 1009 if (list_empty(&qgroup->dirty)) 1010 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1011 } 1012 1013 /* 1014 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts 1015 * the modification into a list that's later used by btrfs_end_transaction to 1016 * pass the recorded modifications on to btrfs_qgroup_account_ref. 1017 */ 1018 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans, 1019 struct btrfs_delayed_ref_node *node, 1020 struct btrfs_delayed_extent_op *extent_op) 1021 { 1022 struct qgroup_update *u; 1023 1024 BUG_ON(!trans->delayed_ref_elem.seq); 1025 u = kmalloc(sizeof(*u), GFP_NOFS); 1026 if (!u) 1027 return -ENOMEM; 1028 1029 u->node = node; 1030 u->extent_op = extent_op; 1031 list_add_tail(&u->list, &trans->qgroup_ref_list); 1032 1033 return 0; 1034 } 1035 1036 /* 1037 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted 1038 * from the fs. First, all roots referencing the extent are searched, and 1039 * then the space is accounted accordingly to the different roots. The 1040 * accounting algorithm works in 3 steps documented inline. 1041 */ 1042 int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, 1043 struct btrfs_fs_info *fs_info, 1044 struct btrfs_delayed_ref_node *node, 1045 struct btrfs_delayed_extent_op *extent_op) 1046 { 1047 struct btrfs_key ins; 1048 struct btrfs_root *quota_root; 1049 u64 ref_root; 1050 struct btrfs_qgroup *qgroup; 1051 struct ulist_node *unode; 1052 struct ulist *roots = NULL; 1053 struct ulist *tmp = NULL; 1054 struct ulist_iterator uiter; 1055 u64 seq; 1056 int ret = 0; 1057 int sgn; 1058 1059 if (!fs_info->quota_enabled) 1060 return 0; 1061 1062 BUG_ON(!fs_info->quota_root); 1063 1064 ins.objectid = node->bytenr; 1065 ins.offset = node->num_bytes; 1066 ins.type = BTRFS_EXTENT_ITEM_KEY; 1067 1068 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1069 node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 1070 struct btrfs_delayed_tree_ref *ref; 1071 ref = btrfs_delayed_node_to_tree_ref(node); 1072 ref_root = ref->root; 1073 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1074 node->type == BTRFS_SHARED_DATA_REF_KEY) { 1075 struct btrfs_delayed_data_ref *ref; 1076 ref = btrfs_delayed_node_to_data_ref(node); 1077 ref_root = ref->root; 1078 } else { 1079 BUG(); 1080 } 1081 1082 if (!is_fstree(ref_root)) { 1083 /* 1084 * non-fs-trees are not being accounted 1085 */ 1086 return 0; 1087 } 1088 1089 switch (node->action) { 1090 case BTRFS_ADD_DELAYED_REF: 1091 case BTRFS_ADD_DELAYED_EXTENT: 1092 sgn = 1; 1093 break; 1094 case BTRFS_DROP_DELAYED_REF: 1095 sgn = -1; 1096 break; 1097 case BTRFS_UPDATE_DELAYED_HEAD: 1098 return 0; 1099 default: 1100 BUG(); 1101 } 1102 1103 /* 1104 * the delayed ref sequence number we pass depends on the direction of 1105 * the operation. for add operations, we pass (node->seq - 1) to skip 1106 * the delayed ref's current sequence number, because we need the state 1107 * of the tree before the add operation. for delete operations, we pass 1108 * (node->seq) to include the delayed ref's current sequence number, 1109 * because we need the state of the tree after the delete operation. 1110 */ 1111 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, 1112 sgn > 0 ? node->seq - 1 : node->seq, &roots); 1113 if (ret < 0) 1114 goto out; 1115 1116 spin_lock(&fs_info->qgroup_lock); 1117 quota_root = fs_info->quota_root; 1118 if (!quota_root) 1119 goto unlock; 1120 1121 qgroup = find_qgroup_rb(fs_info, ref_root); 1122 if (!qgroup) 1123 goto unlock; 1124 1125 /* 1126 * step 1: for each old ref, visit all nodes once and inc refcnt 1127 */ 1128 tmp = ulist_alloc(GFP_ATOMIC); 1129 if (!tmp) { 1130 ret = -ENOMEM; 1131 goto unlock; 1132 } 1133 seq = fs_info->qgroup_seq; 1134 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */ 1135 1136 ULIST_ITER_INIT(&uiter); 1137 while ((unode = ulist_next(roots, &uiter))) { 1138 struct ulist_node *tmp_unode; 1139 struct ulist_iterator tmp_uiter; 1140 struct btrfs_qgroup *qg; 1141 1142 qg = find_qgroup_rb(fs_info, unode->val); 1143 if (!qg) 1144 continue; 1145 1146 ulist_reinit(tmp); 1147 /* XXX id not needed */ 1148 ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); 1149 ULIST_ITER_INIT(&tmp_uiter); 1150 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 1151 struct btrfs_qgroup_list *glist; 1152 1153 qg = (struct btrfs_qgroup *)tmp_unode->aux; 1154 if (qg->refcnt < seq) 1155 qg->refcnt = seq + 1; 1156 else 1157 ++qg->refcnt; 1158 1159 list_for_each_entry(glist, &qg->groups, next_group) { 1160 ulist_add(tmp, glist->group->qgroupid, 1161 (unsigned long)glist->group, 1162 GFP_ATOMIC); 1163 } 1164 } 1165 } 1166 1167 /* 1168 * step 2: walk from the new root 1169 */ 1170 ulist_reinit(tmp); 1171 ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); 1172 ULIST_ITER_INIT(&uiter); 1173 while ((unode = ulist_next(tmp, &uiter))) { 1174 struct btrfs_qgroup *qg; 1175 struct btrfs_qgroup_list *glist; 1176 1177 qg = (struct btrfs_qgroup *)unode->aux; 1178 if (qg->refcnt < seq) { 1179 /* not visited by step 1 */ 1180 qg->rfer += sgn * node->num_bytes; 1181 qg->rfer_cmpr += sgn * node->num_bytes; 1182 if (roots->nnodes == 0) { 1183 qg->excl += sgn * node->num_bytes; 1184 qg->excl_cmpr += sgn * node->num_bytes; 1185 } 1186 qgroup_dirty(fs_info, qg); 1187 } 1188 WARN_ON(qg->tag >= seq); 1189 qg->tag = seq; 1190 1191 list_for_each_entry(glist, &qg->groups, next_group) { 1192 ulist_add(tmp, glist->group->qgroupid, 1193 (unsigned long)glist->group, GFP_ATOMIC); 1194 } 1195 } 1196 1197 /* 1198 * step 3: walk again from old refs 1199 */ 1200 ULIST_ITER_INIT(&uiter); 1201 while ((unode = ulist_next(roots, &uiter))) { 1202 struct btrfs_qgroup *qg; 1203 struct ulist_node *tmp_unode; 1204 struct ulist_iterator tmp_uiter; 1205 1206 qg = find_qgroup_rb(fs_info, unode->val); 1207 if (!qg) 1208 continue; 1209 1210 ulist_reinit(tmp); 1211 ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); 1212 ULIST_ITER_INIT(&tmp_uiter); 1213 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 1214 struct btrfs_qgroup_list *glist; 1215 1216 qg = (struct btrfs_qgroup *)tmp_unode->aux; 1217 if (qg->tag == seq) 1218 continue; 1219 1220 if (qg->refcnt - seq == roots->nnodes) { 1221 qg->excl -= sgn * node->num_bytes; 1222 qg->excl_cmpr -= sgn * node->num_bytes; 1223 qgroup_dirty(fs_info, qg); 1224 } 1225 1226 list_for_each_entry(glist, &qg->groups, next_group) { 1227 ulist_add(tmp, glist->group->qgroupid, 1228 (unsigned long)glist->group, 1229 GFP_ATOMIC); 1230 } 1231 } 1232 } 1233 ret = 0; 1234 unlock: 1235 spin_unlock(&fs_info->qgroup_lock); 1236 out: 1237 ulist_free(roots); 1238 ulist_free(tmp); 1239 1240 return ret; 1241 } 1242 1243 /* 1244 * called from commit_transaction. Writes all changed qgroups to disk. 1245 */ 1246 int btrfs_run_qgroups(struct btrfs_trans_handle *trans, 1247 struct btrfs_fs_info *fs_info) 1248 { 1249 struct btrfs_root *quota_root = fs_info->quota_root; 1250 int ret = 0; 1251 1252 if (!quota_root) 1253 goto out; 1254 1255 fs_info->quota_enabled = fs_info->pending_quota_state; 1256 1257 spin_lock(&fs_info->qgroup_lock); 1258 while (!list_empty(&fs_info->dirty_qgroups)) { 1259 struct btrfs_qgroup *qgroup; 1260 qgroup = list_first_entry(&fs_info->dirty_qgroups, 1261 struct btrfs_qgroup, dirty); 1262 list_del_init(&qgroup->dirty); 1263 spin_unlock(&fs_info->qgroup_lock); 1264 ret = update_qgroup_info_item(trans, quota_root, qgroup); 1265 if (ret) 1266 fs_info->qgroup_flags |= 1267 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1268 spin_lock(&fs_info->qgroup_lock); 1269 } 1270 if (fs_info->quota_enabled) 1271 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 1272 else 1273 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1274 spin_unlock(&fs_info->qgroup_lock); 1275 1276 ret = update_qgroup_status_item(trans, fs_info, quota_root); 1277 if (ret) 1278 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1279 1280 out: 1281 1282 return ret; 1283 } 1284 1285 /* 1286 * copy the acounting information between qgroups. This is necessary when a 1287 * snapshot or a subvolume is created 1288 */ 1289 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 1290 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, 1291 struct btrfs_qgroup_inherit *inherit) 1292 { 1293 int ret = 0; 1294 int i; 1295 u64 *i_qgroups; 1296 struct btrfs_root *quota_root = fs_info->quota_root; 1297 struct btrfs_qgroup *srcgroup; 1298 struct btrfs_qgroup *dstgroup; 1299 u32 level_size = 0; 1300 1301 if (!fs_info->quota_enabled) 1302 return 0; 1303 1304 if (!quota_root) 1305 return -EINVAL; 1306 1307 /* 1308 * create a tracking group for the subvol itself 1309 */ 1310 ret = add_qgroup_item(trans, quota_root, objectid); 1311 if (ret) 1312 goto out; 1313 1314 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 1315 ret = update_qgroup_limit_item(trans, quota_root, objectid, 1316 inherit->lim.flags, 1317 inherit->lim.max_rfer, 1318 inherit->lim.max_excl, 1319 inherit->lim.rsv_rfer, 1320 inherit->lim.rsv_excl); 1321 if (ret) 1322 goto out; 1323 } 1324 1325 if (srcid) { 1326 struct btrfs_root *srcroot; 1327 struct btrfs_key srckey; 1328 int srcroot_level; 1329 1330 srckey.objectid = srcid; 1331 srckey.type = BTRFS_ROOT_ITEM_KEY; 1332 srckey.offset = (u64)-1; 1333 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey); 1334 if (IS_ERR(srcroot)) { 1335 ret = PTR_ERR(srcroot); 1336 goto out; 1337 } 1338 1339 rcu_read_lock(); 1340 srcroot_level = btrfs_header_level(srcroot->node); 1341 level_size = btrfs_level_size(srcroot, srcroot_level); 1342 rcu_read_unlock(); 1343 } 1344 1345 /* 1346 * add qgroup to all inherited groups 1347 */ 1348 if (inherit) { 1349 i_qgroups = (u64 *)(inherit + 1); 1350 for (i = 0; i < inherit->num_qgroups; ++i) { 1351 ret = add_qgroup_relation_item(trans, quota_root, 1352 objectid, *i_qgroups); 1353 if (ret) 1354 goto out; 1355 ret = add_qgroup_relation_item(trans, quota_root, 1356 *i_qgroups, objectid); 1357 if (ret) 1358 goto out; 1359 ++i_qgroups; 1360 } 1361 } 1362 1363 1364 spin_lock(&fs_info->qgroup_lock); 1365 1366 dstgroup = add_qgroup_rb(fs_info, objectid); 1367 if (IS_ERR(dstgroup)) { 1368 ret = PTR_ERR(dstgroup); 1369 goto unlock; 1370 } 1371 1372 if (srcid) { 1373 srcgroup = find_qgroup_rb(fs_info, srcid); 1374 if (!srcgroup) { 1375 ret = -EINVAL; 1376 goto unlock; 1377 } 1378 dstgroup->rfer = srcgroup->rfer - level_size; 1379 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; 1380 srcgroup->excl = level_size; 1381 srcgroup->excl_cmpr = level_size; 1382 qgroup_dirty(fs_info, dstgroup); 1383 qgroup_dirty(fs_info, srcgroup); 1384 } 1385 1386 if (!inherit) { 1387 ret = -EINVAL; 1388 goto unlock; 1389 } 1390 1391 i_qgroups = (u64 *)(inherit + 1); 1392 for (i = 0; i < inherit->num_qgroups; ++i) { 1393 ret = add_relation_rb(quota_root->fs_info, objectid, 1394 *i_qgroups); 1395 if (ret) 1396 goto unlock; 1397 ++i_qgroups; 1398 } 1399 1400 for (i = 0; i < inherit->num_ref_copies; ++i) { 1401 struct btrfs_qgroup *src; 1402 struct btrfs_qgroup *dst; 1403 1404 src = find_qgroup_rb(fs_info, i_qgroups[0]); 1405 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 1406 1407 if (!src || !dst) { 1408 ret = -EINVAL; 1409 goto unlock; 1410 } 1411 1412 dst->rfer = src->rfer - level_size; 1413 dst->rfer_cmpr = src->rfer_cmpr - level_size; 1414 i_qgroups += 2; 1415 } 1416 for (i = 0; i < inherit->num_excl_copies; ++i) { 1417 struct btrfs_qgroup *src; 1418 struct btrfs_qgroup *dst; 1419 1420 src = find_qgroup_rb(fs_info, i_qgroups[0]); 1421 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 1422 1423 if (!src || !dst) { 1424 ret = -EINVAL; 1425 goto unlock; 1426 } 1427 1428 dst->excl = src->excl + level_size; 1429 dst->excl_cmpr = src->excl_cmpr + level_size; 1430 i_qgroups += 2; 1431 } 1432 1433 unlock: 1434 spin_unlock(&fs_info->qgroup_lock); 1435 out: 1436 return ret; 1437 } 1438 1439 /* 1440 * reserve some space for a qgroup and all its parents. The reservation takes 1441 * place with start_transaction or dealloc_reserve, similar to ENOSPC 1442 * accounting. If not enough space is available, EDQUOT is returned. 1443 * We assume that the requested space is new for all qgroups. 1444 */ 1445 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) 1446 { 1447 struct btrfs_root *quota_root; 1448 struct btrfs_qgroup *qgroup; 1449 struct btrfs_fs_info *fs_info = root->fs_info; 1450 u64 ref_root = root->root_key.objectid; 1451 int ret = 0; 1452 struct ulist *ulist = NULL; 1453 struct ulist_node *unode; 1454 struct ulist_iterator uiter; 1455 1456 if (!is_fstree(ref_root)) 1457 return 0; 1458 1459 if (num_bytes == 0) 1460 return 0; 1461 1462 spin_lock(&fs_info->qgroup_lock); 1463 quota_root = fs_info->quota_root; 1464 if (!quota_root) 1465 goto out; 1466 1467 qgroup = find_qgroup_rb(fs_info, ref_root); 1468 if (!qgroup) 1469 goto out; 1470 1471 /* 1472 * in a first step, we check all affected qgroups if any limits would 1473 * be exceeded 1474 */ 1475 ulist = ulist_alloc(GFP_ATOMIC); 1476 ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); 1477 ULIST_ITER_INIT(&uiter); 1478 while ((unode = ulist_next(ulist, &uiter))) { 1479 struct btrfs_qgroup *qg; 1480 struct btrfs_qgroup_list *glist; 1481 1482 qg = (struct btrfs_qgroup *)unode->aux; 1483 1484 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 1485 qg->reserved + qg->rfer + num_bytes > 1486 qg->max_rfer) 1487 ret = -EDQUOT; 1488 1489 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 1490 qg->reserved + qg->excl + num_bytes > 1491 qg->max_excl) 1492 ret = -EDQUOT; 1493 1494 list_for_each_entry(glist, &qg->groups, next_group) { 1495 ulist_add(ulist, glist->group->qgroupid, 1496 (unsigned long)glist->group, GFP_ATOMIC); 1497 } 1498 } 1499 if (ret) 1500 goto out; 1501 1502 /* 1503 * no limits exceeded, now record the reservation into all qgroups 1504 */ 1505 ULIST_ITER_INIT(&uiter); 1506 while ((unode = ulist_next(ulist, &uiter))) { 1507 struct btrfs_qgroup *qg; 1508 1509 qg = (struct btrfs_qgroup *)unode->aux; 1510 1511 qg->reserved += num_bytes; 1512 } 1513 1514 out: 1515 spin_unlock(&fs_info->qgroup_lock); 1516 ulist_free(ulist); 1517 1518 return ret; 1519 } 1520 1521 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) 1522 { 1523 struct btrfs_root *quota_root; 1524 struct btrfs_qgroup *qgroup; 1525 struct btrfs_fs_info *fs_info = root->fs_info; 1526 struct ulist *ulist = NULL; 1527 struct ulist_node *unode; 1528 struct ulist_iterator uiter; 1529 u64 ref_root = root->root_key.objectid; 1530 1531 if (!is_fstree(ref_root)) 1532 return; 1533 1534 if (num_bytes == 0) 1535 return; 1536 1537 spin_lock(&fs_info->qgroup_lock); 1538 1539 quota_root = fs_info->quota_root; 1540 if (!quota_root) 1541 goto out; 1542 1543 qgroup = find_qgroup_rb(fs_info, ref_root); 1544 if (!qgroup) 1545 goto out; 1546 1547 ulist = ulist_alloc(GFP_ATOMIC); 1548 ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); 1549 ULIST_ITER_INIT(&uiter); 1550 while ((unode = ulist_next(ulist, &uiter))) { 1551 struct btrfs_qgroup *qg; 1552 struct btrfs_qgroup_list *glist; 1553 1554 qg = (struct btrfs_qgroup *)unode->aux; 1555 1556 qg->reserved -= num_bytes; 1557 1558 list_for_each_entry(glist, &qg->groups, next_group) { 1559 ulist_add(ulist, glist->group->qgroupid, 1560 (unsigned long)glist->group, GFP_ATOMIC); 1561 } 1562 } 1563 1564 out: 1565 spin_unlock(&fs_info->qgroup_lock); 1566 ulist_free(ulist); 1567 } 1568 1569 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) 1570 { 1571 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq) 1572 return; 1573 printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n", 1574 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not", 1575 trans->delayed_ref_elem.seq); 1576 BUG(); 1577 } 1578