1 /* 2 * Copyright (C) 2008 Red Hat. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/pagemap.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/math64.h> 23 #include <linux/ratelimit.h> 24 #include "ctree.h" 25 #include "free-space-cache.h" 26 #include "transaction.h" 27 #include "disk-io.h" 28 #include "extent_io.h" 29 #include "inode-map.h" 30 31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 33 34 static int link_free_space(struct btrfs_free_space_ctl *ctl, 35 struct btrfs_free_space *info); 36 static void unlink_free_space(struct btrfs_free_space_ctl *ctl, 37 struct btrfs_free_space *info); 38 39 static struct inode *__lookup_free_space_inode(struct btrfs_root *root, 40 struct btrfs_path *path, 41 u64 offset) 42 { 43 struct btrfs_key key; 44 struct btrfs_key location; 45 struct btrfs_disk_key disk_key; 46 struct btrfs_free_space_header *header; 47 struct extent_buffer *leaf; 48 struct inode *inode = NULL; 49 int ret; 50 51 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 52 key.offset = offset; 53 key.type = 0; 54 55 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 56 if (ret < 0) 57 return ERR_PTR(ret); 58 if (ret > 0) { 59 btrfs_release_path(path); 60 return ERR_PTR(-ENOENT); 61 } 62 63 leaf = path->nodes[0]; 64 header = btrfs_item_ptr(leaf, path->slots[0], 65 struct btrfs_free_space_header); 66 btrfs_free_space_key(leaf, header, &disk_key); 67 btrfs_disk_key_to_cpu(&location, &disk_key); 68 btrfs_release_path(path); 69 70 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); 71 if (!inode) 72 return ERR_PTR(-ENOENT); 73 if (IS_ERR(inode)) 74 return inode; 75 if (is_bad_inode(inode)) { 76 iput(inode); 77 return ERR_PTR(-ENOENT); 78 } 79 80 mapping_set_gfp_mask(inode->i_mapping, 81 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 82 83 return inode; 84 } 85 86 struct inode *lookup_free_space_inode(struct btrfs_root *root, 87 struct btrfs_block_group_cache 88 *block_group, struct btrfs_path *path) 89 { 90 struct inode *inode = NULL; 91 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; 92 93 spin_lock(&block_group->lock); 94 if (block_group->inode) 95 inode = igrab(block_group->inode); 96 spin_unlock(&block_group->lock); 97 if (inode) 98 return inode; 99 100 inode = __lookup_free_space_inode(root, path, 101 block_group->key.objectid); 102 if (IS_ERR(inode)) 103 return inode; 104 105 spin_lock(&block_group->lock); 106 if (!((BTRFS_I(inode)->flags & flags) == flags)) { 107 btrfs_info(root->fs_info, 108 "Old style space inode found, converting."); 109 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | 110 BTRFS_INODE_NODATACOW; 111 block_group->disk_cache_state = BTRFS_DC_CLEAR; 112 } 113 114 if (!block_group->iref) { 115 block_group->inode = igrab(inode); 116 block_group->iref = 1; 117 } 118 spin_unlock(&block_group->lock); 119 120 return inode; 121 } 122 123 static int __create_free_space_inode(struct btrfs_root *root, 124 struct btrfs_trans_handle *trans, 125 struct btrfs_path *path, 126 u64 ino, u64 offset) 127 { 128 struct btrfs_key key; 129 struct btrfs_disk_key disk_key; 130 struct btrfs_free_space_header *header; 131 struct btrfs_inode_item *inode_item; 132 struct extent_buffer *leaf; 133 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC; 134 int ret; 135 136 ret = btrfs_insert_empty_inode(trans, root, path, ino); 137 if (ret) 138 return ret; 139 140 /* We inline crc's for the free disk space cache */ 141 if (ino != BTRFS_FREE_INO_OBJECTID) 142 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; 143 144 leaf = path->nodes[0]; 145 inode_item = btrfs_item_ptr(leaf, path->slots[0], 146 struct btrfs_inode_item); 147 btrfs_item_key(leaf, &disk_key, path->slots[0]); 148 memset_extent_buffer(leaf, 0, (unsigned long)inode_item, 149 sizeof(*inode_item)); 150 btrfs_set_inode_generation(leaf, inode_item, trans->transid); 151 btrfs_set_inode_size(leaf, inode_item, 0); 152 btrfs_set_inode_nbytes(leaf, inode_item, 0); 153 btrfs_set_inode_uid(leaf, inode_item, 0); 154 btrfs_set_inode_gid(leaf, inode_item, 0); 155 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); 156 btrfs_set_inode_flags(leaf, inode_item, flags); 157 btrfs_set_inode_nlink(leaf, inode_item, 1); 158 btrfs_set_inode_transid(leaf, inode_item, trans->transid); 159 btrfs_set_inode_block_group(leaf, inode_item, offset); 160 btrfs_mark_buffer_dirty(leaf); 161 btrfs_release_path(path); 162 163 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 164 key.offset = offset; 165 key.type = 0; 166 167 ret = btrfs_insert_empty_item(trans, root, path, &key, 168 sizeof(struct btrfs_free_space_header)); 169 if (ret < 0) { 170 btrfs_release_path(path); 171 return ret; 172 } 173 leaf = path->nodes[0]; 174 header = btrfs_item_ptr(leaf, path->slots[0], 175 struct btrfs_free_space_header); 176 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); 177 btrfs_set_free_space_key(leaf, header, &disk_key); 178 btrfs_mark_buffer_dirty(leaf); 179 btrfs_release_path(path); 180 181 return 0; 182 } 183 184 int create_free_space_inode(struct btrfs_root *root, 185 struct btrfs_trans_handle *trans, 186 struct btrfs_block_group_cache *block_group, 187 struct btrfs_path *path) 188 { 189 int ret; 190 u64 ino; 191 192 ret = btrfs_find_free_objectid(root, &ino); 193 if (ret < 0) 194 return ret; 195 196 return __create_free_space_inode(root, trans, path, ino, 197 block_group->key.objectid); 198 } 199 200 int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, 201 struct btrfs_block_rsv *rsv) 202 { 203 u64 needed_bytes; 204 int ret; 205 206 /* 1 for slack space, 1 for updating the inode */ 207 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + 208 btrfs_calc_trans_metadata_size(root, 1); 209 210 spin_lock(&rsv->lock); 211 if (rsv->reserved < needed_bytes) 212 ret = -ENOSPC; 213 else 214 ret = 0; 215 spin_unlock(&rsv->lock); 216 return ret; 217 } 218 219 int btrfs_truncate_free_space_cache(struct btrfs_root *root, 220 struct btrfs_trans_handle *trans, 221 struct inode *inode) 222 { 223 int ret = 0; 224 225 btrfs_i_size_write(inode, 0); 226 truncate_pagecache(inode, 0); 227 228 /* 229 * We don't need an orphan item because truncating the free space cache 230 * will never be split across transactions. 231 */ 232 ret = btrfs_truncate_inode_items(trans, root, inode, 233 0, BTRFS_EXTENT_DATA_KEY); 234 if (ret) { 235 btrfs_abort_transaction(trans, root, ret); 236 return ret; 237 } 238 239 ret = btrfs_update_inode(trans, root, inode); 240 if (ret) 241 btrfs_abort_transaction(trans, root, ret); 242 243 return ret; 244 } 245 246 static int readahead_cache(struct inode *inode) 247 { 248 struct file_ra_state *ra; 249 unsigned long last_index; 250 251 ra = kzalloc(sizeof(*ra), GFP_NOFS); 252 if (!ra) 253 return -ENOMEM; 254 255 file_ra_state_init(ra, inode->i_mapping); 256 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 257 258 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); 259 260 kfree(ra); 261 262 return 0; 263 } 264 265 struct io_ctl { 266 void *cur, *orig; 267 struct page *page; 268 struct page **pages; 269 struct btrfs_root *root; 270 unsigned long size; 271 int index; 272 int num_pages; 273 unsigned check_crcs:1; 274 }; 275 276 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, 277 struct btrfs_root *root) 278 { 279 memset(io_ctl, 0, sizeof(struct io_ctl)); 280 io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 281 PAGE_CACHE_SHIFT; 282 io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages, 283 GFP_NOFS); 284 if (!io_ctl->pages) 285 return -ENOMEM; 286 io_ctl->root = root; 287 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) 288 io_ctl->check_crcs = 1; 289 return 0; 290 } 291 292 static void io_ctl_free(struct io_ctl *io_ctl) 293 { 294 kfree(io_ctl->pages); 295 } 296 297 static void io_ctl_unmap_page(struct io_ctl *io_ctl) 298 { 299 if (io_ctl->cur) { 300 kunmap(io_ctl->page); 301 io_ctl->cur = NULL; 302 io_ctl->orig = NULL; 303 } 304 } 305 306 static void io_ctl_map_page(struct io_ctl *io_ctl, int clear) 307 { 308 ASSERT(io_ctl->index < io_ctl->num_pages); 309 io_ctl->page = io_ctl->pages[io_ctl->index++]; 310 io_ctl->cur = kmap(io_ctl->page); 311 io_ctl->orig = io_ctl->cur; 312 io_ctl->size = PAGE_CACHE_SIZE; 313 if (clear) 314 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); 315 } 316 317 static void io_ctl_drop_pages(struct io_ctl *io_ctl) 318 { 319 int i; 320 321 io_ctl_unmap_page(io_ctl); 322 323 for (i = 0; i < io_ctl->num_pages; i++) { 324 if (io_ctl->pages[i]) { 325 ClearPageChecked(io_ctl->pages[i]); 326 unlock_page(io_ctl->pages[i]); 327 page_cache_release(io_ctl->pages[i]); 328 } 329 } 330 } 331 332 static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, 333 int uptodate) 334 { 335 struct page *page; 336 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 337 int i; 338 339 for (i = 0; i < io_ctl->num_pages; i++) { 340 page = find_or_create_page(inode->i_mapping, i, mask); 341 if (!page) { 342 io_ctl_drop_pages(io_ctl); 343 return -ENOMEM; 344 } 345 io_ctl->pages[i] = page; 346 if (uptodate && !PageUptodate(page)) { 347 btrfs_readpage(NULL, page); 348 lock_page(page); 349 if (!PageUptodate(page)) { 350 btrfs_err(BTRFS_I(inode)->root->fs_info, 351 "error reading free space cache"); 352 io_ctl_drop_pages(io_ctl); 353 return -EIO; 354 } 355 } 356 } 357 358 for (i = 0; i < io_ctl->num_pages; i++) { 359 clear_page_dirty_for_io(io_ctl->pages[i]); 360 set_page_extent_mapped(io_ctl->pages[i]); 361 } 362 363 return 0; 364 } 365 366 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) 367 { 368 __le64 *val; 369 370 io_ctl_map_page(io_ctl, 1); 371 372 /* 373 * Skip the csum areas. If we don't check crcs then we just have a 374 * 64bit chunk at the front of the first page. 375 */ 376 if (io_ctl->check_crcs) { 377 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); 378 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); 379 } else { 380 io_ctl->cur += sizeof(u64); 381 io_ctl->size -= sizeof(u64) * 2; 382 } 383 384 val = io_ctl->cur; 385 *val = cpu_to_le64(generation); 386 io_ctl->cur += sizeof(u64); 387 } 388 389 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) 390 { 391 __le64 *gen; 392 393 /* 394 * Skip the crc area. If we don't check crcs then we just have a 64bit 395 * chunk at the front of the first page. 396 */ 397 if (io_ctl->check_crcs) { 398 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; 399 io_ctl->size -= sizeof(u64) + 400 (sizeof(u32) * io_ctl->num_pages); 401 } else { 402 io_ctl->cur += sizeof(u64); 403 io_ctl->size -= sizeof(u64) * 2; 404 } 405 406 gen = io_ctl->cur; 407 if (le64_to_cpu(*gen) != generation) { 408 printk_ratelimited(KERN_ERR "BTRFS: space cache generation " 409 "(%Lu) does not match inode (%Lu)\n", *gen, 410 generation); 411 io_ctl_unmap_page(io_ctl); 412 return -EIO; 413 } 414 io_ctl->cur += sizeof(u64); 415 return 0; 416 } 417 418 static void io_ctl_set_crc(struct io_ctl *io_ctl, int index) 419 { 420 u32 *tmp; 421 u32 crc = ~(u32)0; 422 unsigned offset = 0; 423 424 if (!io_ctl->check_crcs) { 425 io_ctl_unmap_page(io_ctl); 426 return; 427 } 428 429 if (index == 0) 430 offset = sizeof(u32) * io_ctl->num_pages; 431 432 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 433 PAGE_CACHE_SIZE - offset); 434 btrfs_csum_final(crc, (char *)&crc); 435 io_ctl_unmap_page(io_ctl); 436 tmp = kmap(io_ctl->pages[0]); 437 tmp += index; 438 *tmp = crc; 439 kunmap(io_ctl->pages[0]); 440 } 441 442 static int io_ctl_check_crc(struct io_ctl *io_ctl, int index) 443 { 444 u32 *tmp, val; 445 u32 crc = ~(u32)0; 446 unsigned offset = 0; 447 448 if (!io_ctl->check_crcs) { 449 io_ctl_map_page(io_ctl, 0); 450 return 0; 451 } 452 453 if (index == 0) 454 offset = sizeof(u32) * io_ctl->num_pages; 455 456 tmp = kmap(io_ctl->pages[0]); 457 tmp += index; 458 val = *tmp; 459 kunmap(io_ctl->pages[0]); 460 461 io_ctl_map_page(io_ctl, 0); 462 crc = btrfs_csum_data(io_ctl->orig + offset, crc, 463 PAGE_CACHE_SIZE - offset); 464 btrfs_csum_final(crc, (char *)&crc); 465 if (val != crc) { 466 printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free " 467 "space cache\n"); 468 io_ctl_unmap_page(io_ctl); 469 return -EIO; 470 } 471 472 return 0; 473 } 474 475 static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes, 476 void *bitmap) 477 { 478 struct btrfs_free_space_entry *entry; 479 480 if (!io_ctl->cur) 481 return -ENOSPC; 482 483 entry = io_ctl->cur; 484 entry->offset = cpu_to_le64(offset); 485 entry->bytes = cpu_to_le64(bytes); 486 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : 487 BTRFS_FREE_SPACE_EXTENT; 488 io_ctl->cur += sizeof(struct btrfs_free_space_entry); 489 io_ctl->size -= sizeof(struct btrfs_free_space_entry); 490 491 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 492 return 0; 493 494 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 495 496 /* No more pages to map */ 497 if (io_ctl->index >= io_ctl->num_pages) 498 return 0; 499 500 /* map the next page */ 501 io_ctl_map_page(io_ctl, 1); 502 return 0; 503 } 504 505 static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap) 506 { 507 if (!io_ctl->cur) 508 return -ENOSPC; 509 510 /* 511 * If we aren't at the start of the current page, unmap this one and 512 * map the next one if there is any left. 513 */ 514 if (io_ctl->cur != io_ctl->orig) { 515 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 516 if (io_ctl->index >= io_ctl->num_pages) 517 return -ENOSPC; 518 io_ctl_map_page(io_ctl, 0); 519 } 520 521 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); 522 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 523 if (io_ctl->index < io_ctl->num_pages) 524 io_ctl_map_page(io_ctl, 0); 525 return 0; 526 } 527 528 static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl) 529 { 530 /* 531 * If we're not on the boundary we know we've modified the page and we 532 * need to crc the page. 533 */ 534 if (io_ctl->cur != io_ctl->orig) 535 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 536 else 537 io_ctl_unmap_page(io_ctl); 538 539 while (io_ctl->index < io_ctl->num_pages) { 540 io_ctl_map_page(io_ctl, 1); 541 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 542 } 543 } 544 545 static int io_ctl_read_entry(struct io_ctl *io_ctl, 546 struct btrfs_free_space *entry, u8 *type) 547 { 548 struct btrfs_free_space_entry *e; 549 int ret; 550 551 if (!io_ctl->cur) { 552 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 553 if (ret) 554 return ret; 555 } 556 557 e = io_ctl->cur; 558 entry->offset = le64_to_cpu(e->offset); 559 entry->bytes = le64_to_cpu(e->bytes); 560 *type = e->type; 561 io_ctl->cur += sizeof(struct btrfs_free_space_entry); 562 io_ctl->size -= sizeof(struct btrfs_free_space_entry); 563 564 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 565 return 0; 566 567 io_ctl_unmap_page(io_ctl); 568 569 return 0; 570 } 571 572 static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 573 struct btrfs_free_space *entry) 574 { 575 int ret; 576 577 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 578 if (ret) 579 return ret; 580 581 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); 582 io_ctl_unmap_page(io_ctl); 583 584 return 0; 585 } 586 587 /* 588 * Since we attach pinned extents after the fact we can have contiguous sections 589 * of free space that are split up in entries. This poses a problem with the 590 * tree logging stuff since it could have allocated across what appears to be 2 591 * entries since we would have merged the entries when adding the pinned extents 592 * back to the free space cache. So run through the space cache that we just 593 * loaded and merge contiguous entries. This will make the log replay stuff not 594 * blow up and it will make for nicer allocator behavior. 595 */ 596 static void merge_space_tree(struct btrfs_free_space_ctl *ctl) 597 { 598 struct btrfs_free_space *e, *prev = NULL; 599 struct rb_node *n; 600 601 again: 602 spin_lock(&ctl->tree_lock); 603 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { 604 e = rb_entry(n, struct btrfs_free_space, offset_index); 605 if (!prev) 606 goto next; 607 if (e->bitmap || prev->bitmap) 608 goto next; 609 if (prev->offset + prev->bytes == e->offset) { 610 unlink_free_space(ctl, prev); 611 unlink_free_space(ctl, e); 612 prev->bytes += e->bytes; 613 kmem_cache_free(btrfs_free_space_cachep, e); 614 link_free_space(ctl, prev); 615 prev = NULL; 616 spin_unlock(&ctl->tree_lock); 617 goto again; 618 } 619 next: 620 prev = e; 621 } 622 spin_unlock(&ctl->tree_lock); 623 } 624 625 static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 626 struct btrfs_free_space_ctl *ctl, 627 struct btrfs_path *path, u64 offset) 628 { 629 struct btrfs_free_space_header *header; 630 struct extent_buffer *leaf; 631 struct io_ctl io_ctl; 632 struct btrfs_key key; 633 struct btrfs_free_space *e, *n; 634 struct list_head bitmaps; 635 u64 num_entries; 636 u64 num_bitmaps; 637 u64 generation; 638 u8 type; 639 int ret = 0; 640 641 INIT_LIST_HEAD(&bitmaps); 642 643 /* Nothing in the space cache, goodbye */ 644 if (!i_size_read(inode)) 645 return 0; 646 647 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 648 key.offset = offset; 649 key.type = 0; 650 651 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 652 if (ret < 0) 653 return 0; 654 else if (ret > 0) { 655 btrfs_release_path(path); 656 return 0; 657 } 658 659 ret = -1; 660 661 leaf = path->nodes[0]; 662 header = btrfs_item_ptr(leaf, path->slots[0], 663 struct btrfs_free_space_header); 664 num_entries = btrfs_free_space_entries(leaf, header); 665 num_bitmaps = btrfs_free_space_bitmaps(leaf, header); 666 generation = btrfs_free_space_generation(leaf, header); 667 btrfs_release_path(path); 668 669 if (BTRFS_I(inode)->generation != generation) { 670 btrfs_err(root->fs_info, 671 "free space inode generation (%llu) " 672 "did not match free space cache generation (%llu)", 673 BTRFS_I(inode)->generation, generation); 674 return 0; 675 } 676 677 if (!num_entries) 678 return 0; 679 680 ret = io_ctl_init(&io_ctl, inode, root); 681 if (ret) 682 return ret; 683 684 ret = readahead_cache(inode); 685 if (ret) 686 goto out; 687 688 ret = io_ctl_prepare_pages(&io_ctl, inode, 1); 689 if (ret) 690 goto out; 691 692 ret = io_ctl_check_crc(&io_ctl, 0); 693 if (ret) 694 goto free_cache; 695 696 ret = io_ctl_check_generation(&io_ctl, generation); 697 if (ret) 698 goto free_cache; 699 700 while (num_entries) { 701 e = kmem_cache_zalloc(btrfs_free_space_cachep, 702 GFP_NOFS); 703 if (!e) 704 goto free_cache; 705 706 ret = io_ctl_read_entry(&io_ctl, e, &type); 707 if (ret) { 708 kmem_cache_free(btrfs_free_space_cachep, e); 709 goto free_cache; 710 } 711 712 if (!e->bytes) { 713 kmem_cache_free(btrfs_free_space_cachep, e); 714 goto free_cache; 715 } 716 717 if (type == BTRFS_FREE_SPACE_EXTENT) { 718 spin_lock(&ctl->tree_lock); 719 ret = link_free_space(ctl, e); 720 spin_unlock(&ctl->tree_lock); 721 if (ret) { 722 btrfs_err(root->fs_info, 723 "Duplicate entries in free space cache, dumping"); 724 kmem_cache_free(btrfs_free_space_cachep, e); 725 goto free_cache; 726 } 727 } else { 728 ASSERT(num_bitmaps); 729 num_bitmaps--; 730 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 731 if (!e->bitmap) { 732 kmem_cache_free( 733 btrfs_free_space_cachep, e); 734 goto free_cache; 735 } 736 spin_lock(&ctl->tree_lock); 737 ret = link_free_space(ctl, e); 738 ctl->total_bitmaps++; 739 ctl->op->recalc_thresholds(ctl); 740 spin_unlock(&ctl->tree_lock); 741 if (ret) { 742 btrfs_err(root->fs_info, 743 "Duplicate entries in free space cache, dumping"); 744 kmem_cache_free(btrfs_free_space_cachep, e); 745 goto free_cache; 746 } 747 list_add_tail(&e->list, &bitmaps); 748 } 749 750 num_entries--; 751 } 752 753 io_ctl_unmap_page(&io_ctl); 754 755 /* 756 * We add the bitmaps at the end of the entries in order that 757 * the bitmap entries are added to the cache. 758 */ 759 list_for_each_entry_safe(e, n, &bitmaps, list) { 760 list_del_init(&e->list); 761 ret = io_ctl_read_bitmap(&io_ctl, e); 762 if (ret) 763 goto free_cache; 764 } 765 766 io_ctl_drop_pages(&io_ctl); 767 merge_space_tree(ctl); 768 ret = 1; 769 out: 770 io_ctl_free(&io_ctl); 771 return ret; 772 free_cache: 773 io_ctl_drop_pages(&io_ctl); 774 __btrfs_remove_free_space_cache(ctl); 775 goto out; 776 } 777 778 int load_free_space_cache(struct btrfs_fs_info *fs_info, 779 struct btrfs_block_group_cache *block_group) 780 { 781 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 782 struct btrfs_root *root = fs_info->tree_root; 783 struct inode *inode; 784 struct btrfs_path *path; 785 int ret = 0; 786 bool matched; 787 u64 used = btrfs_block_group_used(&block_group->item); 788 789 /* 790 * If this block group has been marked to be cleared for one reason or 791 * another then we can't trust the on disk cache, so just return. 792 */ 793 spin_lock(&block_group->lock); 794 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { 795 spin_unlock(&block_group->lock); 796 return 0; 797 } 798 spin_unlock(&block_group->lock); 799 800 path = btrfs_alloc_path(); 801 if (!path) 802 return 0; 803 path->search_commit_root = 1; 804 path->skip_locking = 1; 805 806 inode = lookup_free_space_inode(root, block_group, path); 807 if (IS_ERR(inode)) { 808 btrfs_free_path(path); 809 return 0; 810 } 811 812 /* We may have converted the inode and made the cache invalid. */ 813 spin_lock(&block_group->lock); 814 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { 815 spin_unlock(&block_group->lock); 816 btrfs_free_path(path); 817 goto out; 818 } 819 spin_unlock(&block_group->lock); 820 821 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, 822 path, block_group->key.objectid); 823 btrfs_free_path(path); 824 if (ret <= 0) 825 goto out; 826 827 spin_lock(&ctl->tree_lock); 828 matched = (ctl->free_space == (block_group->key.offset - used - 829 block_group->bytes_super)); 830 spin_unlock(&ctl->tree_lock); 831 832 if (!matched) { 833 __btrfs_remove_free_space_cache(ctl); 834 btrfs_err(fs_info, "block group %llu has wrong amount of free space", 835 block_group->key.objectid); 836 ret = -1; 837 } 838 out: 839 if (ret < 0) { 840 /* This cache is bogus, make sure it gets cleared */ 841 spin_lock(&block_group->lock); 842 block_group->disk_cache_state = BTRFS_DC_CLEAR; 843 spin_unlock(&block_group->lock); 844 ret = 0; 845 846 btrfs_err(fs_info, "failed to load free space cache for block group %llu", 847 block_group->key.objectid); 848 } 849 850 iput(inode); 851 return ret; 852 } 853 854 /** 855 * __btrfs_write_out_cache - write out cached info to an inode 856 * @root - the root the inode belongs to 857 * @ctl - the free space cache we are going to write out 858 * @block_group - the block_group for this cache if it belongs to a block_group 859 * @trans - the trans handle 860 * @path - the path to use 861 * @offset - the offset for the key we'll insert 862 * 863 * This function writes out a free space cache struct to disk for quick recovery 864 * on mount. This will return 0 if it was successfull in writing the cache out, 865 * and -1 if it was not. 866 */ 867 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, 868 struct btrfs_free_space_ctl *ctl, 869 struct btrfs_block_group_cache *block_group, 870 struct btrfs_trans_handle *trans, 871 struct btrfs_path *path, u64 offset) 872 { 873 struct btrfs_free_space_header *header; 874 struct extent_buffer *leaf; 875 struct rb_node *node; 876 struct list_head *pos, *n; 877 struct extent_state *cached_state = NULL; 878 struct btrfs_free_cluster *cluster = NULL; 879 struct extent_io_tree *unpin = NULL; 880 struct io_ctl io_ctl; 881 struct list_head bitmap_list; 882 struct btrfs_key key; 883 u64 start, extent_start, extent_end, len; 884 int entries = 0; 885 int bitmaps = 0; 886 int ret; 887 int err = -1; 888 889 INIT_LIST_HEAD(&bitmap_list); 890 891 if (!i_size_read(inode)) 892 return -1; 893 894 ret = io_ctl_init(&io_ctl, inode, root); 895 if (ret) 896 return -1; 897 898 /* Get the cluster for this block_group if it exists */ 899 if (block_group && !list_empty(&block_group->cluster_list)) 900 cluster = list_entry(block_group->cluster_list.next, 901 struct btrfs_free_cluster, 902 block_group_list); 903 904 /* Lock all pages first so we can lock the extent safely. */ 905 io_ctl_prepare_pages(&io_ctl, inode, 0); 906 907 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 908 0, &cached_state); 909 910 node = rb_first(&ctl->free_space_offset); 911 if (!node && cluster) { 912 node = rb_first(&cluster->root); 913 cluster = NULL; 914 } 915 916 /* Make sure we can fit our crcs into the first page */ 917 if (io_ctl.check_crcs && 918 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) 919 goto out_nospc; 920 921 io_ctl_set_generation(&io_ctl, trans->transid); 922 923 /* Write out the extent entries */ 924 while (node) { 925 struct btrfs_free_space *e; 926 927 e = rb_entry(node, struct btrfs_free_space, offset_index); 928 entries++; 929 930 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes, 931 e->bitmap); 932 if (ret) 933 goto out_nospc; 934 935 if (e->bitmap) { 936 list_add_tail(&e->list, &bitmap_list); 937 bitmaps++; 938 } 939 node = rb_next(node); 940 if (!node && cluster) { 941 node = rb_first(&cluster->root); 942 cluster = NULL; 943 } 944 } 945 946 /* 947 * We want to add any pinned extents to our free space cache 948 * so we don't leak the space 949 */ 950 951 /* 952 * We shouldn't have switched the pinned extents yet so this is the 953 * right one 954 */ 955 unpin = root->fs_info->pinned_extents; 956 957 if (block_group) 958 start = block_group->key.objectid; 959 960 while (block_group && (start < block_group->key.objectid + 961 block_group->key.offset)) { 962 ret = find_first_extent_bit(unpin, start, 963 &extent_start, &extent_end, 964 EXTENT_DIRTY, NULL); 965 if (ret) { 966 ret = 0; 967 break; 968 } 969 970 /* This pinned extent is out of our range */ 971 if (extent_start >= block_group->key.objectid + 972 block_group->key.offset) 973 break; 974 975 extent_start = max(extent_start, start); 976 extent_end = min(block_group->key.objectid + 977 block_group->key.offset, extent_end + 1); 978 len = extent_end - extent_start; 979 980 entries++; 981 ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL); 982 if (ret) 983 goto out_nospc; 984 985 start = extent_end; 986 } 987 988 /* Write out the bitmaps */ 989 list_for_each_safe(pos, n, &bitmap_list) { 990 struct btrfs_free_space *entry = 991 list_entry(pos, struct btrfs_free_space, list); 992 993 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap); 994 if (ret) 995 goto out_nospc; 996 list_del_init(&entry->list); 997 } 998 999 /* Zero out the rest of the pages just to make sure */ 1000 io_ctl_zero_remaining_pages(&io_ctl); 1001 1002 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages, 1003 0, i_size_read(inode), &cached_state); 1004 io_ctl_drop_pages(&io_ctl); 1005 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 1006 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 1007 1008 if (ret) 1009 goto out; 1010 1011 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 1012 if (ret) { 1013 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 1014 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, 1015 GFP_NOFS); 1016 goto out; 1017 } 1018 1019 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 1020 key.offset = offset; 1021 key.type = 0; 1022 1023 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1024 if (ret < 0) { 1025 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 1026 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, 1027 GFP_NOFS); 1028 goto out; 1029 } 1030 leaf = path->nodes[0]; 1031 if (ret > 0) { 1032 struct btrfs_key found_key; 1033 ASSERT(path->slots[0]); 1034 path->slots[0]--; 1035 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1036 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || 1037 found_key.offset != offset) { 1038 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, 1039 inode->i_size - 1, 1040 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, 1041 NULL, GFP_NOFS); 1042 btrfs_release_path(path); 1043 goto out; 1044 } 1045 } 1046 1047 BTRFS_I(inode)->generation = trans->transid; 1048 header = btrfs_item_ptr(leaf, path->slots[0], 1049 struct btrfs_free_space_header); 1050 btrfs_set_free_space_entries(leaf, header, entries); 1051 btrfs_set_free_space_bitmaps(leaf, header, bitmaps); 1052 btrfs_set_free_space_generation(leaf, header, trans->transid); 1053 btrfs_mark_buffer_dirty(leaf); 1054 btrfs_release_path(path); 1055 1056 err = 0; 1057 out: 1058 io_ctl_free(&io_ctl); 1059 if (err) { 1060 invalidate_inode_pages2(inode->i_mapping); 1061 BTRFS_I(inode)->generation = 0; 1062 } 1063 btrfs_update_inode(trans, root, inode); 1064 return err; 1065 1066 out_nospc: 1067 list_for_each_safe(pos, n, &bitmap_list) { 1068 struct btrfs_free_space *entry = 1069 list_entry(pos, struct btrfs_free_space, list); 1070 list_del_init(&entry->list); 1071 } 1072 io_ctl_drop_pages(&io_ctl); 1073 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 1074 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 1075 goto out; 1076 } 1077 1078 int btrfs_write_out_cache(struct btrfs_root *root, 1079 struct btrfs_trans_handle *trans, 1080 struct btrfs_block_group_cache *block_group, 1081 struct btrfs_path *path) 1082 { 1083 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1084 struct inode *inode; 1085 int ret = 0; 1086 1087 root = root->fs_info->tree_root; 1088 1089 spin_lock(&block_group->lock); 1090 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { 1091 spin_unlock(&block_group->lock); 1092 return 0; 1093 } 1094 spin_unlock(&block_group->lock); 1095 1096 inode = lookup_free_space_inode(root, block_group, path); 1097 if (IS_ERR(inode)) 1098 return 0; 1099 1100 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, 1101 path, block_group->key.objectid); 1102 if (ret) { 1103 spin_lock(&block_group->lock); 1104 block_group->disk_cache_state = BTRFS_DC_ERROR; 1105 spin_unlock(&block_group->lock); 1106 ret = 0; 1107 #ifdef DEBUG 1108 btrfs_err(root->fs_info, 1109 "failed to write free space cache for block group %llu", 1110 block_group->key.objectid); 1111 #endif 1112 } 1113 1114 iput(inode); 1115 return ret; 1116 } 1117 1118 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, 1119 u64 offset) 1120 { 1121 ASSERT(offset >= bitmap_start); 1122 offset -= bitmap_start; 1123 return (unsigned long)(div_u64(offset, unit)); 1124 } 1125 1126 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) 1127 { 1128 return (unsigned long)(div_u64(bytes, unit)); 1129 } 1130 1131 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, 1132 u64 offset) 1133 { 1134 u64 bitmap_start; 1135 u64 bytes_per_bitmap; 1136 1137 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; 1138 bitmap_start = offset - ctl->start; 1139 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); 1140 bitmap_start *= bytes_per_bitmap; 1141 bitmap_start += ctl->start; 1142 1143 return bitmap_start; 1144 } 1145 1146 static int tree_insert_offset(struct rb_root *root, u64 offset, 1147 struct rb_node *node, int bitmap) 1148 { 1149 struct rb_node **p = &root->rb_node; 1150 struct rb_node *parent = NULL; 1151 struct btrfs_free_space *info; 1152 1153 while (*p) { 1154 parent = *p; 1155 info = rb_entry(parent, struct btrfs_free_space, offset_index); 1156 1157 if (offset < info->offset) { 1158 p = &(*p)->rb_left; 1159 } else if (offset > info->offset) { 1160 p = &(*p)->rb_right; 1161 } else { 1162 /* 1163 * we could have a bitmap entry and an extent entry 1164 * share the same offset. If this is the case, we want 1165 * the extent entry to always be found first if we do a 1166 * linear search through the tree, since we want to have 1167 * the quickest allocation time, and allocating from an 1168 * extent is faster than allocating from a bitmap. So 1169 * if we're inserting a bitmap and we find an entry at 1170 * this offset, we want to go right, or after this entry 1171 * logically. If we are inserting an extent and we've 1172 * found a bitmap, we want to go left, or before 1173 * logically. 1174 */ 1175 if (bitmap) { 1176 if (info->bitmap) { 1177 WARN_ON_ONCE(1); 1178 return -EEXIST; 1179 } 1180 p = &(*p)->rb_right; 1181 } else { 1182 if (!info->bitmap) { 1183 WARN_ON_ONCE(1); 1184 return -EEXIST; 1185 } 1186 p = &(*p)->rb_left; 1187 } 1188 } 1189 } 1190 1191 rb_link_node(node, parent, p); 1192 rb_insert_color(node, root); 1193 1194 return 0; 1195 } 1196 1197 /* 1198 * searches the tree for the given offset. 1199 * 1200 * fuzzy - If this is set, then we are trying to make an allocation, and we just 1201 * want a section that has at least bytes size and comes at or after the given 1202 * offset. 1203 */ 1204 static struct btrfs_free_space * 1205 tree_search_offset(struct btrfs_free_space_ctl *ctl, 1206 u64 offset, int bitmap_only, int fuzzy) 1207 { 1208 struct rb_node *n = ctl->free_space_offset.rb_node; 1209 struct btrfs_free_space *entry, *prev = NULL; 1210 1211 /* find entry that is closest to the 'offset' */ 1212 while (1) { 1213 if (!n) { 1214 entry = NULL; 1215 break; 1216 } 1217 1218 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1219 prev = entry; 1220 1221 if (offset < entry->offset) 1222 n = n->rb_left; 1223 else if (offset > entry->offset) 1224 n = n->rb_right; 1225 else 1226 break; 1227 } 1228 1229 if (bitmap_only) { 1230 if (!entry) 1231 return NULL; 1232 if (entry->bitmap) 1233 return entry; 1234 1235 /* 1236 * bitmap entry and extent entry may share same offset, 1237 * in that case, bitmap entry comes after extent entry. 1238 */ 1239 n = rb_next(n); 1240 if (!n) 1241 return NULL; 1242 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1243 if (entry->offset != offset) 1244 return NULL; 1245 1246 WARN_ON(!entry->bitmap); 1247 return entry; 1248 } else if (entry) { 1249 if (entry->bitmap) { 1250 /* 1251 * if previous extent entry covers the offset, 1252 * we should return it instead of the bitmap entry 1253 */ 1254 n = rb_prev(&entry->offset_index); 1255 if (n) { 1256 prev = rb_entry(n, struct btrfs_free_space, 1257 offset_index); 1258 if (!prev->bitmap && 1259 prev->offset + prev->bytes > offset) 1260 entry = prev; 1261 } 1262 } 1263 return entry; 1264 } 1265 1266 if (!prev) 1267 return NULL; 1268 1269 /* find last entry before the 'offset' */ 1270 entry = prev; 1271 if (entry->offset > offset) { 1272 n = rb_prev(&entry->offset_index); 1273 if (n) { 1274 entry = rb_entry(n, struct btrfs_free_space, 1275 offset_index); 1276 ASSERT(entry->offset <= offset); 1277 } else { 1278 if (fuzzy) 1279 return entry; 1280 else 1281 return NULL; 1282 } 1283 } 1284 1285 if (entry->bitmap) { 1286 n = rb_prev(&entry->offset_index); 1287 if (n) { 1288 prev = rb_entry(n, struct btrfs_free_space, 1289 offset_index); 1290 if (!prev->bitmap && 1291 prev->offset + prev->bytes > offset) 1292 return prev; 1293 } 1294 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) 1295 return entry; 1296 } else if (entry->offset + entry->bytes > offset) 1297 return entry; 1298 1299 if (!fuzzy) 1300 return NULL; 1301 1302 while (1) { 1303 if (entry->bitmap) { 1304 if (entry->offset + BITS_PER_BITMAP * 1305 ctl->unit > offset) 1306 break; 1307 } else { 1308 if (entry->offset + entry->bytes > offset) 1309 break; 1310 } 1311 1312 n = rb_next(&entry->offset_index); 1313 if (!n) 1314 return NULL; 1315 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1316 } 1317 return entry; 1318 } 1319 1320 static inline void 1321 __unlink_free_space(struct btrfs_free_space_ctl *ctl, 1322 struct btrfs_free_space *info) 1323 { 1324 rb_erase(&info->offset_index, &ctl->free_space_offset); 1325 ctl->free_extents--; 1326 } 1327 1328 static void unlink_free_space(struct btrfs_free_space_ctl *ctl, 1329 struct btrfs_free_space *info) 1330 { 1331 __unlink_free_space(ctl, info); 1332 ctl->free_space -= info->bytes; 1333 } 1334 1335 static int link_free_space(struct btrfs_free_space_ctl *ctl, 1336 struct btrfs_free_space *info) 1337 { 1338 int ret = 0; 1339 1340 ASSERT(info->bytes || info->bitmap); 1341 ret = tree_insert_offset(&ctl->free_space_offset, info->offset, 1342 &info->offset_index, (info->bitmap != NULL)); 1343 if (ret) 1344 return ret; 1345 1346 ctl->free_space += info->bytes; 1347 ctl->free_extents++; 1348 return ret; 1349 } 1350 1351 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) 1352 { 1353 struct btrfs_block_group_cache *block_group = ctl->private; 1354 u64 max_bytes; 1355 u64 bitmap_bytes; 1356 u64 extent_bytes; 1357 u64 size = block_group->key.offset; 1358 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 1359 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); 1360 1361 max_bitmaps = max(max_bitmaps, 1); 1362 1363 ASSERT(ctl->total_bitmaps <= max_bitmaps); 1364 1365 /* 1366 * The goal is to keep the total amount of memory used per 1gb of space 1367 * at or below 32k, so we need to adjust how much memory we allow to be 1368 * used by extent based free space tracking 1369 */ 1370 if (size < 1024 * 1024 * 1024) 1371 max_bytes = MAX_CACHE_BYTES_PER_GIG; 1372 else 1373 max_bytes = MAX_CACHE_BYTES_PER_GIG * 1374 div64_u64(size, 1024 * 1024 * 1024); 1375 1376 /* 1377 * we want to account for 1 more bitmap than what we have so we can make 1378 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1379 * we add more bitmaps. 1380 */ 1381 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1382 1383 if (bitmap_bytes >= max_bytes) { 1384 ctl->extents_thresh = 0; 1385 return; 1386 } 1387 1388 /* 1389 * we want the extent entry threshold to always be at most 1/2 the maxw 1390 * bytes we can have, or whatever is less than that. 1391 */ 1392 extent_bytes = max_bytes - bitmap_bytes; 1393 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); 1394 1395 ctl->extents_thresh = 1396 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); 1397 } 1398 1399 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, 1400 struct btrfs_free_space *info, 1401 u64 offset, u64 bytes) 1402 { 1403 unsigned long start, count; 1404 1405 start = offset_to_bit(info->offset, ctl->unit, offset); 1406 count = bytes_to_bits(bytes, ctl->unit); 1407 ASSERT(start + count <= BITS_PER_BITMAP); 1408 1409 bitmap_clear(info->bitmap, start, count); 1410 1411 info->bytes -= bytes; 1412 } 1413 1414 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, 1415 struct btrfs_free_space *info, u64 offset, 1416 u64 bytes) 1417 { 1418 __bitmap_clear_bits(ctl, info, offset, bytes); 1419 ctl->free_space -= bytes; 1420 } 1421 1422 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, 1423 struct btrfs_free_space *info, u64 offset, 1424 u64 bytes) 1425 { 1426 unsigned long start, count; 1427 1428 start = offset_to_bit(info->offset, ctl->unit, offset); 1429 count = bytes_to_bits(bytes, ctl->unit); 1430 ASSERT(start + count <= BITS_PER_BITMAP); 1431 1432 bitmap_set(info->bitmap, start, count); 1433 1434 info->bytes += bytes; 1435 ctl->free_space += bytes; 1436 } 1437 1438 /* 1439 * If we can not find suitable extent, we will use bytes to record 1440 * the size of the max extent. 1441 */ 1442 static int search_bitmap(struct btrfs_free_space_ctl *ctl, 1443 struct btrfs_free_space *bitmap_info, u64 *offset, 1444 u64 *bytes) 1445 { 1446 unsigned long found_bits = 0; 1447 unsigned long max_bits = 0; 1448 unsigned long bits, i; 1449 unsigned long next_zero; 1450 unsigned long extent_bits; 1451 1452 i = offset_to_bit(bitmap_info->offset, ctl->unit, 1453 max_t(u64, *offset, bitmap_info->offset)); 1454 bits = bytes_to_bits(*bytes, ctl->unit); 1455 1456 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { 1457 next_zero = find_next_zero_bit(bitmap_info->bitmap, 1458 BITS_PER_BITMAP, i); 1459 extent_bits = next_zero - i; 1460 if (extent_bits >= bits) { 1461 found_bits = extent_bits; 1462 break; 1463 } else if (extent_bits > max_bits) { 1464 max_bits = extent_bits; 1465 } 1466 i = next_zero; 1467 } 1468 1469 if (found_bits) { 1470 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; 1471 *bytes = (u64)(found_bits) * ctl->unit; 1472 return 0; 1473 } 1474 1475 *bytes = (u64)(max_bits) * ctl->unit; 1476 return -1; 1477 } 1478 1479 /* Cache the size of the max extent in bytes */ 1480 static struct btrfs_free_space * 1481 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, 1482 unsigned long align, u64 *max_extent_size) 1483 { 1484 struct btrfs_free_space *entry; 1485 struct rb_node *node; 1486 u64 tmp; 1487 u64 align_off; 1488 int ret; 1489 1490 if (!ctl->free_space_offset.rb_node) 1491 goto out; 1492 1493 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); 1494 if (!entry) 1495 goto out; 1496 1497 for (node = &entry->offset_index; node; node = rb_next(node)) { 1498 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1499 if (entry->bytes < *bytes) { 1500 if (entry->bytes > *max_extent_size) 1501 *max_extent_size = entry->bytes; 1502 continue; 1503 } 1504 1505 /* make sure the space returned is big enough 1506 * to match our requested alignment 1507 */ 1508 if (*bytes >= align) { 1509 tmp = entry->offset - ctl->start + align - 1; 1510 do_div(tmp, align); 1511 tmp = tmp * align + ctl->start; 1512 align_off = tmp - entry->offset; 1513 } else { 1514 align_off = 0; 1515 tmp = entry->offset; 1516 } 1517 1518 if (entry->bytes < *bytes + align_off) { 1519 if (entry->bytes > *max_extent_size) 1520 *max_extent_size = entry->bytes; 1521 continue; 1522 } 1523 1524 if (entry->bitmap) { 1525 u64 size = *bytes; 1526 1527 ret = search_bitmap(ctl, entry, &tmp, &size); 1528 if (!ret) { 1529 *offset = tmp; 1530 *bytes = size; 1531 return entry; 1532 } else if (size > *max_extent_size) { 1533 *max_extent_size = size; 1534 } 1535 continue; 1536 } 1537 1538 *offset = tmp; 1539 *bytes = entry->bytes - align_off; 1540 return entry; 1541 } 1542 out: 1543 return NULL; 1544 } 1545 1546 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, 1547 struct btrfs_free_space *info, u64 offset) 1548 { 1549 info->offset = offset_to_bitmap(ctl, offset); 1550 info->bytes = 0; 1551 INIT_LIST_HEAD(&info->list); 1552 link_free_space(ctl, info); 1553 ctl->total_bitmaps++; 1554 1555 ctl->op->recalc_thresholds(ctl); 1556 } 1557 1558 static void free_bitmap(struct btrfs_free_space_ctl *ctl, 1559 struct btrfs_free_space *bitmap_info) 1560 { 1561 unlink_free_space(ctl, bitmap_info); 1562 kfree(bitmap_info->bitmap); 1563 kmem_cache_free(btrfs_free_space_cachep, bitmap_info); 1564 ctl->total_bitmaps--; 1565 ctl->op->recalc_thresholds(ctl); 1566 } 1567 1568 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, 1569 struct btrfs_free_space *bitmap_info, 1570 u64 *offset, u64 *bytes) 1571 { 1572 u64 end; 1573 u64 search_start, search_bytes; 1574 int ret; 1575 1576 again: 1577 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; 1578 1579 /* 1580 * We need to search for bits in this bitmap. We could only cover some 1581 * of the extent in this bitmap thanks to how we add space, so we need 1582 * to search for as much as it as we can and clear that amount, and then 1583 * go searching for the next bit. 1584 */ 1585 search_start = *offset; 1586 search_bytes = ctl->unit; 1587 search_bytes = min(search_bytes, end - search_start + 1); 1588 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); 1589 if (ret < 0 || search_start != *offset) 1590 return -EINVAL; 1591 1592 /* We may have found more bits than what we need */ 1593 search_bytes = min(search_bytes, *bytes); 1594 1595 /* Cannot clear past the end of the bitmap */ 1596 search_bytes = min(search_bytes, end - search_start + 1); 1597 1598 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes); 1599 *offset += search_bytes; 1600 *bytes -= search_bytes; 1601 1602 if (*bytes) { 1603 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1604 if (!bitmap_info->bytes) 1605 free_bitmap(ctl, bitmap_info); 1606 1607 /* 1608 * no entry after this bitmap, but we still have bytes to 1609 * remove, so something has gone wrong. 1610 */ 1611 if (!next) 1612 return -EINVAL; 1613 1614 bitmap_info = rb_entry(next, struct btrfs_free_space, 1615 offset_index); 1616 1617 /* 1618 * if the next entry isn't a bitmap we need to return to let the 1619 * extent stuff do its work. 1620 */ 1621 if (!bitmap_info->bitmap) 1622 return -EAGAIN; 1623 1624 /* 1625 * Ok the next item is a bitmap, but it may not actually hold 1626 * the information for the rest of this free space stuff, so 1627 * look for it, and if we don't find it return so we can try 1628 * everything over again. 1629 */ 1630 search_start = *offset; 1631 search_bytes = ctl->unit; 1632 ret = search_bitmap(ctl, bitmap_info, &search_start, 1633 &search_bytes); 1634 if (ret < 0 || search_start != *offset) 1635 return -EAGAIN; 1636 1637 goto again; 1638 } else if (!bitmap_info->bytes) 1639 free_bitmap(ctl, bitmap_info); 1640 1641 return 0; 1642 } 1643 1644 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, 1645 struct btrfs_free_space *info, u64 offset, 1646 u64 bytes) 1647 { 1648 u64 bytes_to_set = 0; 1649 u64 end; 1650 1651 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); 1652 1653 bytes_to_set = min(end - offset, bytes); 1654 1655 bitmap_set_bits(ctl, info, offset, bytes_to_set); 1656 1657 return bytes_to_set; 1658 1659 } 1660 1661 static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 1662 struct btrfs_free_space *info) 1663 { 1664 struct btrfs_block_group_cache *block_group = ctl->private; 1665 1666 /* 1667 * If we are below the extents threshold then we can add this as an 1668 * extent, and don't have to deal with the bitmap 1669 */ 1670 if (ctl->free_extents < ctl->extents_thresh) { 1671 /* 1672 * If this block group has some small extents we don't want to 1673 * use up all of our free slots in the cache with them, we want 1674 * to reserve them to larger extents, however if we have plent 1675 * of cache left then go ahead an dadd them, no sense in adding 1676 * the overhead of a bitmap if we don't have to. 1677 */ 1678 if (info->bytes <= block_group->sectorsize * 4) { 1679 if (ctl->free_extents * 2 <= ctl->extents_thresh) 1680 return false; 1681 } else { 1682 return false; 1683 } 1684 } 1685 1686 /* 1687 * The original block groups from mkfs can be really small, like 8 1688 * megabytes, so don't bother with a bitmap for those entries. However 1689 * some block groups can be smaller than what a bitmap would cover but 1690 * are still large enough that they could overflow the 32k memory limit, 1691 * so allow those block groups to still be allowed to have a bitmap 1692 * entry. 1693 */ 1694 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset) 1695 return false; 1696 1697 return true; 1698 } 1699 1700 static struct btrfs_free_space_op free_space_op = { 1701 .recalc_thresholds = recalculate_thresholds, 1702 .use_bitmap = use_bitmap, 1703 }; 1704 1705 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, 1706 struct btrfs_free_space *info) 1707 { 1708 struct btrfs_free_space *bitmap_info; 1709 struct btrfs_block_group_cache *block_group = NULL; 1710 int added = 0; 1711 u64 bytes, offset, bytes_added; 1712 int ret; 1713 1714 bytes = info->bytes; 1715 offset = info->offset; 1716 1717 if (!ctl->op->use_bitmap(ctl, info)) 1718 return 0; 1719 1720 if (ctl->op == &free_space_op) 1721 block_group = ctl->private; 1722 again: 1723 /* 1724 * Since we link bitmaps right into the cluster we need to see if we 1725 * have a cluster here, and if so and it has our bitmap we need to add 1726 * the free space to that bitmap. 1727 */ 1728 if (block_group && !list_empty(&block_group->cluster_list)) { 1729 struct btrfs_free_cluster *cluster; 1730 struct rb_node *node; 1731 struct btrfs_free_space *entry; 1732 1733 cluster = list_entry(block_group->cluster_list.next, 1734 struct btrfs_free_cluster, 1735 block_group_list); 1736 spin_lock(&cluster->lock); 1737 node = rb_first(&cluster->root); 1738 if (!node) { 1739 spin_unlock(&cluster->lock); 1740 goto no_cluster_bitmap; 1741 } 1742 1743 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1744 if (!entry->bitmap) { 1745 spin_unlock(&cluster->lock); 1746 goto no_cluster_bitmap; 1747 } 1748 1749 if (entry->offset == offset_to_bitmap(ctl, offset)) { 1750 bytes_added = add_bytes_to_bitmap(ctl, entry, 1751 offset, bytes); 1752 bytes -= bytes_added; 1753 offset += bytes_added; 1754 } 1755 spin_unlock(&cluster->lock); 1756 if (!bytes) { 1757 ret = 1; 1758 goto out; 1759 } 1760 } 1761 1762 no_cluster_bitmap: 1763 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1764 1, 0); 1765 if (!bitmap_info) { 1766 ASSERT(added == 0); 1767 goto new_bitmap; 1768 } 1769 1770 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); 1771 bytes -= bytes_added; 1772 offset += bytes_added; 1773 added = 0; 1774 1775 if (!bytes) { 1776 ret = 1; 1777 goto out; 1778 } else 1779 goto again; 1780 1781 new_bitmap: 1782 if (info && info->bitmap) { 1783 add_new_bitmap(ctl, info, offset); 1784 added = 1; 1785 info = NULL; 1786 goto again; 1787 } else { 1788 spin_unlock(&ctl->tree_lock); 1789 1790 /* no pre-allocated info, allocate a new one */ 1791 if (!info) { 1792 info = kmem_cache_zalloc(btrfs_free_space_cachep, 1793 GFP_NOFS); 1794 if (!info) { 1795 spin_lock(&ctl->tree_lock); 1796 ret = -ENOMEM; 1797 goto out; 1798 } 1799 } 1800 1801 /* allocate the bitmap */ 1802 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 1803 spin_lock(&ctl->tree_lock); 1804 if (!info->bitmap) { 1805 ret = -ENOMEM; 1806 goto out; 1807 } 1808 goto again; 1809 } 1810 1811 out: 1812 if (info) { 1813 if (info->bitmap) 1814 kfree(info->bitmap); 1815 kmem_cache_free(btrfs_free_space_cachep, info); 1816 } 1817 1818 return ret; 1819 } 1820 1821 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, 1822 struct btrfs_free_space *info, bool update_stat) 1823 { 1824 struct btrfs_free_space *left_info; 1825 struct btrfs_free_space *right_info; 1826 bool merged = false; 1827 u64 offset = info->offset; 1828 u64 bytes = info->bytes; 1829 1830 /* 1831 * first we want to see if there is free space adjacent to the range we 1832 * are adding, if there is remove that struct and add a new one to 1833 * cover the entire range 1834 */ 1835 right_info = tree_search_offset(ctl, offset + bytes, 0, 0); 1836 if (right_info && rb_prev(&right_info->offset_index)) 1837 left_info = rb_entry(rb_prev(&right_info->offset_index), 1838 struct btrfs_free_space, offset_index); 1839 else 1840 left_info = tree_search_offset(ctl, offset - 1, 0, 0); 1841 1842 if (right_info && !right_info->bitmap) { 1843 if (update_stat) 1844 unlink_free_space(ctl, right_info); 1845 else 1846 __unlink_free_space(ctl, right_info); 1847 info->bytes += right_info->bytes; 1848 kmem_cache_free(btrfs_free_space_cachep, right_info); 1849 merged = true; 1850 } 1851 1852 if (left_info && !left_info->bitmap && 1853 left_info->offset + left_info->bytes == offset) { 1854 if (update_stat) 1855 unlink_free_space(ctl, left_info); 1856 else 1857 __unlink_free_space(ctl, left_info); 1858 info->offset = left_info->offset; 1859 info->bytes += left_info->bytes; 1860 kmem_cache_free(btrfs_free_space_cachep, left_info); 1861 merged = true; 1862 } 1863 1864 return merged; 1865 } 1866 1867 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, 1868 u64 offset, u64 bytes) 1869 { 1870 struct btrfs_free_space *info; 1871 int ret = 0; 1872 1873 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); 1874 if (!info) 1875 return -ENOMEM; 1876 1877 info->offset = offset; 1878 info->bytes = bytes; 1879 1880 spin_lock(&ctl->tree_lock); 1881 1882 if (try_merge_free_space(ctl, info, true)) 1883 goto link; 1884 1885 /* 1886 * There was no extent directly to the left or right of this new 1887 * extent then we know we're going to have to allocate a new extent, so 1888 * before we do that see if we need to drop this into a bitmap 1889 */ 1890 ret = insert_into_bitmap(ctl, info); 1891 if (ret < 0) { 1892 goto out; 1893 } else if (ret) { 1894 ret = 0; 1895 goto out; 1896 } 1897 link: 1898 ret = link_free_space(ctl, info); 1899 if (ret) 1900 kmem_cache_free(btrfs_free_space_cachep, info); 1901 out: 1902 spin_unlock(&ctl->tree_lock); 1903 1904 if (ret) { 1905 printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret); 1906 ASSERT(ret != -EEXIST); 1907 } 1908 1909 return ret; 1910 } 1911 1912 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 1913 u64 offset, u64 bytes) 1914 { 1915 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1916 struct btrfs_free_space *info; 1917 int ret; 1918 bool re_search = false; 1919 1920 spin_lock(&ctl->tree_lock); 1921 1922 again: 1923 ret = 0; 1924 if (!bytes) 1925 goto out_lock; 1926 1927 info = tree_search_offset(ctl, offset, 0, 0); 1928 if (!info) { 1929 /* 1930 * oops didn't find an extent that matched the space we wanted 1931 * to remove, look for a bitmap instead 1932 */ 1933 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1934 1, 0); 1935 if (!info) { 1936 /* 1937 * If we found a partial bit of our free space in a 1938 * bitmap but then couldn't find the other part this may 1939 * be a problem, so WARN about it. 1940 */ 1941 WARN_ON(re_search); 1942 goto out_lock; 1943 } 1944 } 1945 1946 re_search = false; 1947 if (!info->bitmap) { 1948 unlink_free_space(ctl, info); 1949 if (offset == info->offset) { 1950 u64 to_free = min(bytes, info->bytes); 1951 1952 info->bytes -= to_free; 1953 info->offset += to_free; 1954 if (info->bytes) { 1955 ret = link_free_space(ctl, info); 1956 WARN_ON(ret); 1957 } else { 1958 kmem_cache_free(btrfs_free_space_cachep, info); 1959 } 1960 1961 offset += to_free; 1962 bytes -= to_free; 1963 goto again; 1964 } else { 1965 u64 old_end = info->bytes + info->offset; 1966 1967 info->bytes = offset - info->offset; 1968 ret = link_free_space(ctl, info); 1969 WARN_ON(ret); 1970 if (ret) 1971 goto out_lock; 1972 1973 /* Not enough bytes in this entry to satisfy us */ 1974 if (old_end < offset + bytes) { 1975 bytes -= old_end - offset; 1976 offset = old_end; 1977 goto again; 1978 } else if (old_end == offset + bytes) { 1979 /* all done */ 1980 goto out_lock; 1981 } 1982 spin_unlock(&ctl->tree_lock); 1983 1984 ret = btrfs_add_free_space(block_group, offset + bytes, 1985 old_end - (offset + bytes)); 1986 WARN_ON(ret); 1987 goto out; 1988 } 1989 } 1990 1991 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1992 if (ret == -EAGAIN) { 1993 re_search = true; 1994 goto again; 1995 } 1996 out_lock: 1997 spin_unlock(&ctl->tree_lock); 1998 out: 1999 return ret; 2000 } 2001 2002 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 2003 u64 bytes) 2004 { 2005 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2006 struct btrfs_free_space *info; 2007 struct rb_node *n; 2008 int count = 0; 2009 2010 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { 2011 info = rb_entry(n, struct btrfs_free_space, offset_index); 2012 if (info->bytes >= bytes && !block_group->ro) 2013 count++; 2014 btrfs_crit(block_group->fs_info, 2015 "entry offset %llu, bytes %llu, bitmap %s", 2016 info->offset, info->bytes, 2017 (info->bitmap) ? "yes" : "no"); 2018 } 2019 btrfs_info(block_group->fs_info, "block group has cluster?: %s", 2020 list_empty(&block_group->cluster_list) ? "no" : "yes"); 2021 btrfs_info(block_group->fs_info, 2022 "%d blocks of free space at or bigger than bytes is", count); 2023 } 2024 2025 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) 2026 { 2027 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2028 2029 spin_lock_init(&ctl->tree_lock); 2030 ctl->unit = block_group->sectorsize; 2031 ctl->start = block_group->key.objectid; 2032 ctl->private = block_group; 2033 ctl->op = &free_space_op; 2034 2035 /* 2036 * we only want to have 32k of ram per block group for keeping 2037 * track of free space, and if we pass 1/2 of that we want to 2038 * start converting things over to using bitmaps 2039 */ 2040 ctl->extents_thresh = ((1024 * 32) / 2) / 2041 sizeof(struct btrfs_free_space); 2042 } 2043 2044 /* 2045 * for a given cluster, put all of its extents back into the free 2046 * space cache. If the block group passed doesn't match the block group 2047 * pointed to by the cluster, someone else raced in and freed the 2048 * cluster already. In that case, we just return without changing anything 2049 */ 2050 static int 2051 __btrfs_return_cluster_to_free_space( 2052 struct btrfs_block_group_cache *block_group, 2053 struct btrfs_free_cluster *cluster) 2054 { 2055 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2056 struct btrfs_free_space *entry; 2057 struct rb_node *node; 2058 2059 spin_lock(&cluster->lock); 2060 if (cluster->block_group != block_group) 2061 goto out; 2062 2063 cluster->block_group = NULL; 2064 cluster->window_start = 0; 2065 list_del_init(&cluster->block_group_list); 2066 2067 node = rb_first(&cluster->root); 2068 while (node) { 2069 bool bitmap; 2070 2071 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2072 node = rb_next(&entry->offset_index); 2073 rb_erase(&entry->offset_index, &cluster->root); 2074 2075 bitmap = (entry->bitmap != NULL); 2076 if (!bitmap) 2077 try_merge_free_space(ctl, entry, false); 2078 tree_insert_offset(&ctl->free_space_offset, 2079 entry->offset, &entry->offset_index, bitmap); 2080 } 2081 cluster->root = RB_ROOT; 2082 2083 out: 2084 spin_unlock(&cluster->lock); 2085 btrfs_put_block_group(block_group); 2086 return 0; 2087 } 2088 2089 static void __btrfs_remove_free_space_cache_locked( 2090 struct btrfs_free_space_ctl *ctl) 2091 { 2092 struct btrfs_free_space *info; 2093 struct rb_node *node; 2094 2095 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { 2096 info = rb_entry(node, struct btrfs_free_space, offset_index); 2097 if (!info->bitmap) { 2098 unlink_free_space(ctl, info); 2099 kmem_cache_free(btrfs_free_space_cachep, info); 2100 } else { 2101 free_bitmap(ctl, info); 2102 } 2103 if (need_resched()) { 2104 spin_unlock(&ctl->tree_lock); 2105 cond_resched(); 2106 spin_lock(&ctl->tree_lock); 2107 } 2108 } 2109 } 2110 2111 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) 2112 { 2113 spin_lock(&ctl->tree_lock); 2114 __btrfs_remove_free_space_cache_locked(ctl); 2115 spin_unlock(&ctl->tree_lock); 2116 } 2117 2118 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) 2119 { 2120 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2121 struct btrfs_free_cluster *cluster; 2122 struct list_head *head; 2123 2124 spin_lock(&ctl->tree_lock); 2125 while ((head = block_group->cluster_list.next) != 2126 &block_group->cluster_list) { 2127 cluster = list_entry(head, struct btrfs_free_cluster, 2128 block_group_list); 2129 2130 WARN_ON(cluster->block_group != block_group); 2131 __btrfs_return_cluster_to_free_space(block_group, cluster); 2132 if (need_resched()) { 2133 spin_unlock(&ctl->tree_lock); 2134 cond_resched(); 2135 spin_lock(&ctl->tree_lock); 2136 } 2137 } 2138 __btrfs_remove_free_space_cache_locked(ctl); 2139 spin_unlock(&ctl->tree_lock); 2140 2141 } 2142 2143 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 2144 u64 offset, u64 bytes, u64 empty_size, 2145 u64 *max_extent_size) 2146 { 2147 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2148 struct btrfs_free_space *entry = NULL; 2149 u64 bytes_search = bytes + empty_size; 2150 u64 ret = 0; 2151 u64 align_gap = 0; 2152 u64 align_gap_len = 0; 2153 2154 spin_lock(&ctl->tree_lock); 2155 entry = find_free_space(ctl, &offset, &bytes_search, 2156 block_group->full_stripe_len, max_extent_size); 2157 if (!entry) 2158 goto out; 2159 2160 ret = offset; 2161 if (entry->bitmap) { 2162 bitmap_clear_bits(ctl, entry, offset, bytes); 2163 if (!entry->bytes) 2164 free_bitmap(ctl, entry); 2165 } else { 2166 unlink_free_space(ctl, entry); 2167 align_gap_len = offset - entry->offset; 2168 align_gap = entry->offset; 2169 2170 entry->offset = offset + bytes; 2171 WARN_ON(entry->bytes < bytes + align_gap_len); 2172 2173 entry->bytes -= bytes + align_gap_len; 2174 if (!entry->bytes) 2175 kmem_cache_free(btrfs_free_space_cachep, entry); 2176 else 2177 link_free_space(ctl, entry); 2178 } 2179 out: 2180 spin_unlock(&ctl->tree_lock); 2181 2182 if (align_gap_len) 2183 __btrfs_add_free_space(ctl, align_gap, align_gap_len); 2184 return ret; 2185 } 2186 2187 /* 2188 * given a cluster, put all of its extents back into the free space 2189 * cache. If a block group is passed, this function will only free 2190 * a cluster that belongs to the passed block group. 2191 * 2192 * Otherwise, it'll get a reference on the block group pointed to by the 2193 * cluster and remove the cluster from it. 2194 */ 2195 int btrfs_return_cluster_to_free_space( 2196 struct btrfs_block_group_cache *block_group, 2197 struct btrfs_free_cluster *cluster) 2198 { 2199 struct btrfs_free_space_ctl *ctl; 2200 int ret; 2201 2202 /* first, get a safe pointer to the block group */ 2203 spin_lock(&cluster->lock); 2204 if (!block_group) { 2205 block_group = cluster->block_group; 2206 if (!block_group) { 2207 spin_unlock(&cluster->lock); 2208 return 0; 2209 } 2210 } else if (cluster->block_group != block_group) { 2211 /* someone else has already freed it don't redo their work */ 2212 spin_unlock(&cluster->lock); 2213 return 0; 2214 } 2215 atomic_inc(&block_group->count); 2216 spin_unlock(&cluster->lock); 2217 2218 ctl = block_group->free_space_ctl; 2219 2220 /* now return any extents the cluster had on it */ 2221 spin_lock(&ctl->tree_lock); 2222 ret = __btrfs_return_cluster_to_free_space(block_group, cluster); 2223 spin_unlock(&ctl->tree_lock); 2224 2225 /* finally drop our ref */ 2226 btrfs_put_block_group(block_group); 2227 return ret; 2228 } 2229 2230 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, 2231 struct btrfs_free_cluster *cluster, 2232 struct btrfs_free_space *entry, 2233 u64 bytes, u64 min_start, 2234 u64 *max_extent_size) 2235 { 2236 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2237 int err; 2238 u64 search_start = cluster->window_start; 2239 u64 search_bytes = bytes; 2240 u64 ret = 0; 2241 2242 search_start = min_start; 2243 search_bytes = bytes; 2244 2245 err = search_bitmap(ctl, entry, &search_start, &search_bytes); 2246 if (err) { 2247 if (search_bytes > *max_extent_size) 2248 *max_extent_size = search_bytes; 2249 return 0; 2250 } 2251 2252 ret = search_start; 2253 __bitmap_clear_bits(ctl, entry, ret, bytes); 2254 2255 return ret; 2256 } 2257 2258 /* 2259 * given a cluster, try to allocate 'bytes' from it, returns 0 2260 * if it couldn't find anything suitably large, or a logical disk offset 2261 * if things worked out 2262 */ 2263 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 2264 struct btrfs_free_cluster *cluster, u64 bytes, 2265 u64 min_start, u64 *max_extent_size) 2266 { 2267 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2268 struct btrfs_free_space *entry = NULL; 2269 struct rb_node *node; 2270 u64 ret = 0; 2271 2272 spin_lock(&cluster->lock); 2273 if (bytes > cluster->max_size) 2274 goto out; 2275 2276 if (cluster->block_group != block_group) 2277 goto out; 2278 2279 node = rb_first(&cluster->root); 2280 if (!node) 2281 goto out; 2282 2283 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2284 while (1) { 2285 if (entry->bytes < bytes && entry->bytes > *max_extent_size) 2286 *max_extent_size = entry->bytes; 2287 2288 if (entry->bytes < bytes || 2289 (!entry->bitmap && entry->offset < min_start)) { 2290 node = rb_next(&entry->offset_index); 2291 if (!node) 2292 break; 2293 entry = rb_entry(node, struct btrfs_free_space, 2294 offset_index); 2295 continue; 2296 } 2297 2298 if (entry->bitmap) { 2299 ret = btrfs_alloc_from_bitmap(block_group, 2300 cluster, entry, bytes, 2301 cluster->window_start, 2302 max_extent_size); 2303 if (ret == 0) { 2304 node = rb_next(&entry->offset_index); 2305 if (!node) 2306 break; 2307 entry = rb_entry(node, struct btrfs_free_space, 2308 offset_index); 2309 continue; 2310 } 2311 cluster->window_start += bytes; 2312 } else { 2313 ret = entry->offset; 2314 2315 entry->offset += bytes; 2316 entry->bytes -= bytes; 2317 } 2318 2319 if (entry->bytes == 0) 2320 rb_erase(&entry->offset_index, &cluster->root); 2321 break; 2322 } 2323 out: 2324 spin_unlock(&cluster->lock); 2325 2326 if (!ret) 2327 return 0; 2328 2329 spin_lock(&ctl->tree_lock); 2330 2331 ctl->free_space -= bytes; 2332 if (entry->bytes == 0) { 2333 ctl->free_extents--; 2334 if (entry->bitmap) { 2335 kfree(entry->bitmap); 2336 ctl->total_bitmaps--; 2337 ctl->op->recalc_thresholds(ctl); 2338 } 2339 kmem_cache_free(btrfs_free_space_cachep, entry); 2340 } 2341 2342 spin_unlock(&ctl->tree_lock); 2343 2344 return ret; 2345 } 2346 2347 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, 2348 struct btrfs_free_space *entry, 2349 struct btrfs_free_cluster *cluster, 2350 u64 offset, u64 bytes, 2351 u64 cont1_bytes, u64 min_bytes) 2352 { 2353 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2354 unsigned long next_zero; 2355 unsigned long i; 2356 unsigned long want_bits; 2357 unsigned long min_bits; 2358 unsigned long found_bits; 2359 unsigned long start = 0; 2360 unsigned long total_found = 0; 2361 int ret; 2362 2363 i = offset_to_bit(entry->offset, ctl->unit, 2364 max_t(u64, offset, entry->offset)); 2365 want_bits = bytes_to_bits(bytes, ctl->unit); 2366 min_bits = bytes_to_bits(min_bytes, ctl->unit); 2367 2368 again: 2369 found_bits = 0; 2370 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { 2371 next_zero = find_next_zero_bit(entry->bitmap, 2372 BITS_PER_BITMAP, i); 2373 if (next_zero - i >= min_bits) { 2374 found_bits = next_zero - i; 2375 break; 2376 } 2377 i = next_zero; 2378 } 2379 2380 if (!found_bits) 2381 return -ENOSPC; 2382 2383 if (!total_found) { 2384 start = i; 2385 cluster->max_size = 0; 2386 } 2387 2388 total_found += found_bits; 2389 2390 if (cluster->max_size < found_bits * ctl->unit) 2391 cluster->max_size = found_bits * ctl->unit; 2392 2393 if (total_found < want_bits || cluster->max_size < cont1_bytes) { 2394 i = next_zero + 1; 2395 goto again; 2396 } 2397 2398 cluster->window_start = start * ctl->unit + entry->offset; 2399 rb_erase(&entry->offset_index, &ctl->free_space_offset); 2400 ret = tree_insert_offset(&cluster->root, entry->offset, 2401 &entry->offset_index, 1); 2402 ASSERT(!ret); /* -EEXIST; Logic error */ 2403 2404 trace_btrfs_setup_cluster(block_group, cluster, 2405 total_found * ctl->unit, 1); 2406 return 0; 2407 } 2408 2409 /* 2410 * This searches the block group for just extents to fill the cluster with. 2411 * Try to find a cluster with at least bytes total bytes, at least one 2412 * extent of cont1_bytes, and other clusters of at least min_bytes. 2413 */ 2414 static noinline int 2415 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, 2416 struct btrfs_free_cluster *cluster, 2417 struct list_head *bitmaps, u64 offset, u64 bytes, 2418 u64 cont1_bytes, u64 min_bytes) 2419 { 2420 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2421 struct btrfs_free_space *first = NULL; 2422 struct btrfs_free_space *entry = NULL; 2423 struct btrfs_free_space *last; 2424 struct rb_node *node; 2425 u64 window_free; 2426 u64 max_extent; 2427 u64 total_size = 0; 2428 2429 entry = tree_search_offset(ctl, offset, 0, 1); 2430 if (!entry) 2431 return -ENOSPC; 2432 2433 /* 2434 * We don't want bitmaps, so just move along until we find a normal 2435 * extent entry. 2436 */ 2437 while (entry->bitmap || entry->bytes < min_bytes) { 2438 if (entry->bitmap && list_empty(&entry->list)) 2439 list_add_tail(&entry->list, bitmaps); 2440 node = rb_next(&entry->offset_index); 2441 if (!node) 2442 return -ENOSPC; 2443 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2444 } 2445 2446 window_free = entry->bytes; 2447 max_extent = entry->bytes; 2448 first = entry; 2449 last = entry; 2450 2451 for (node = rb_next(&entry->offset_index); node; 2452 node = rb_next(&entry->offset_index)) { 2453 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2454 2455 if (entry->bitmap) { 2456 if (list_empty(&entry->list)) 2457 list_add_tail(&entry->list, bitmaps); 2458 continue; 2459 } 2460 2461 if (entry->bytes < min_bytes) 2462 continue; 2463 2464 last = entry; 2465 window_free += entry->bytes; 2466 if (entry->bytes > max_extent) 2467 max_extent = entry->bytes; 2468 } 2469 2470 if (window_free < bytes || max_extent < cont1_bytes) 2471 return -ENOSPC; 2472 2473 cluster->window_start = first->offset; 2474 2475 node = &first->offset_index; 2476 2477 /* 2478 * now we've found our entries, pull them out of the free space 2479 * cache and put them into the cluster rbtree 2480 */ 2481 do { 2482 int ret; 2483 2484 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2485 node = rb_next(&entry->offset_index); 2486 if (entry->bitmap || entry->bytes < min_bytes) 2487 continue; 2488 2489 rb_erase(&entry->offset_index, &ctl->free_space_offset); 2490 ret = tree_insert_offset(&cluster->root, entry->offset, 2491 &entry->offset_index, 0); 2492 total_size += entry->bytes; 2493 ASSERT(!ret); /* -EEXIST; Logic error */ 2494 } while (node && entry != last); 2495 2496 cluster->max_size = max_extent; 2497 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); 2498 return 0; 2499 } 2500 2501 /* 2502 * This specifically looks for bitmaps that may work in the cluster, we assume 2503 * that we have already failed to find extents that will work. 2504 */ 2505 static noinline int 2506 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, 2507 struct btrfs_free_cluster *cluster, 2508 struct list_head *bitmaps, u64 offset, u64 bytes, 2509 u64 cont1_bytes, u64 min_bytes) 2510 { 2511 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2512 struct btrfs_free_space *entry; 2513 int ret = -ENOSPC; 2514 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 2515 2516 if (ctl->total_bitmaps == 0) 2517 return -ENOSPC; 2518 2519 /* 2520 * The bitmap that covers offset won't be in the list unless offset 2521 * is just its start offset. 2522 */ 2523 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2524 if (entry->offset != bitmap_offset) { 2525 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 2526 if (entry && list_empty(&entry->list)) 2527 list_add(&entry->list, bitmaps); 2528 } 2529 2530 list_for_each_entry(entry, bitmaps, list) { 2531 if (entry->bytes < bytes) 2532 continue; 2533 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, 2534 bytes, cont1_bytes, min_bytes); 2535 if (!ret) 2536 return 0; 2537 } 2538 2539 /* 2540 * The bitmaps list has all the bitmaps that record free space 2541 * starting after offset, so no more search is required. 2542 */ 2543 return -ENOSPC; 2544 } 2545 2546 /* 2547 * here we try to find a cluster of blocks in a block group. The goal 2548 * is to find at least bytes+empty_size. 2549 * We might not find them all in one contiguous area. 2550 * 2551 * returns zero and sets up cluster if things worked out, otherwise 2552 * it returns -enospc 2553 */ 2554 int btrfs_find_space_cluster(struct btrfs_root *root, 2555 struct btrfs_block_group_cache *block_group, 2556 struct btrfs_free_cluster *cluster, 2557 u64 offset, u64 bytes, u64 empty_size) 2558 { 2559 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2560 struct btrfs_free_space *entry, *tmp; 2561 LIST_HEAD(bitmaps); 2562 u64 min_bytes; 2563 u64 cont1_bytes; 2564 int ret; 2565 2566 /* 2567 * Choose the minimum extent size we'll require for this 2568 * cluster. For SSD_SPREAD, don't allow any fragmentation. 2569 * For metadata, allow allocates with smaller extents. For 2570 * data, keep it dense. 2571 */ 2572 if (btrfs_test_opt(root, SSD_SPREAD)) { 2573 cont1_bytes = min_bytes = bytes + empty_size; 2574 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { 2575 cont1_bytes = bytes; 2576 min_bytes = block_group->sectorsize; 2577 } else { 2578 cont1_bytes = max(bytes, (bytes + empty_size) >> 2); 2579 min_bytes = block_group->sectorsize; 2580 } 2581 2582 spin_lock(&ctl->tree_lock); 2583 2584 /* 2585 * If we know we don't have enough space to make a cluster don't even 2586 * bother doing all the work to try and find one. 2587 */ 2588 if (ctl->free_space < bytes) { 2589 spin_unlock(&ctl->tree_lock); 2590 return -ENOSPC; 2591 } 2592 2593 spin_lock(&cluster->lock); 2594 2595 /* someone already found a cluster, hooray */ 2596 if (cluster->block_group) { 2597 ret = 0; 2598 goto out; 2599 } 2600 2601 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, 2602 min_bytes); 2603 2604 INIT_LIST_HEAD(&bitmaps); 2605 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2606 bytes + empty_size, 2607 cont1_bytes, min_bytes); 2608 if (ret) 2609 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, 2610 offset, bytes + empty_size, 2611 cont1_bytes, min_bytes); 2612 2613 /* Clear our temporary list */ 2614 list_for_each_entry_safe(entry, tmp, &bitmaps, list) 2615 list_del_init(&entry->list); 2616 2617 if (!ret) { 2618 atomic_inc(&block_group->count); 2619 list_add_tail(&cluster->block_group_list, 2620 &block_group->cluster_list); 2621 cluster->block_group = block_group; 2622 } else { 2623 trace_btrfs_failed_cluster_setup(block_group); 2624 } 2625 out: 2626 spin_unlock(&cluster->lock); 2627 spin_unlock(&ctl->tree_lock); 2628 2629 return ret; 2630 } 2631 2632 /* 2633 * simple code to zero out a cluster 2634 */ 2635 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) 2636 { 2637 spin_lock_init(&cluster->lock); 2638 spin_lock_init(&cluster->refill_lock); 2639 cluster->root = RB_ROOT; 2640 cluster->max_size = 0; 2641 INIT_LIST_HEAD(&cluster->block_group_list); 2642 cluster->block_group = NULL; 2643 } 2644 2645 static int do_trimming(struct btrfs_block_group_cache *block_group, 2646 u64 *total_trimmed, u64 start, u64 bytes, 2647 u64 reserved_start, u64 reserved_bytes) 2648 { 2649 struct btrfs_space_info *space_info = block_group->space_info; 2650 struct btrfs_fs_info *fs_info = block_group->fs_info; 2651 int ret; 2652 int update = 0; 2653 u64 trimmed = 0; 2654 2655 spin_lock(&space_info->lock); 2656 spin_lock(&block_group->lock); 2657 if (!block_group->ro) { 2658 block_group->reserved += reserved_bytes; 2659 space_info->bytes_reserved += reserved_bytes; 2660 update = 1; 2661 } 2662 spin_unlock(&block_group->lock); 2663 spin_unlock(&space_info->lock); 2664 2665 ret = btrfs_error_discard_extent(fs_info->extent_root, 2666 start, bytes, &trimmed); 2667 if (!ret) 2668 *total_trimmed += trimmed; 2669 2670 btrfs_add_free_space(block_group, reserved_start, reserved_bytes); 2671 2672 if (update) { 2673 spin_lock(&space_info->lock); 2674 spin_lock(&block_group->lock); 2675 if (block_group->ro) 2676 space_info->bytes_readonly += reserved_bytes; 2677 block_group->reserved -= reserved_bytes; 2678 space_info->bytes_reserved -= reserved_bytes; 2679 spin_unlock(&space_info->lock); 2680 spin_unlock(&block_group->lock); 2681 } 2682 2683 return ret; 2684 } 2685 2686 static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, 2687 u64 *total_trimmed, u64 start, u64 end, u64 minlen) 2688 { 2689 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2690 struct btrfs_free_space *entry; 2691 struct rb_node *node; 2692 int ret = 0; 2693 u64 extent_start; 2694 u64 extent_bytes; 2695 u64 bytes; 2696 2697 while (start < end) { 2698 spin_lock(&ctl->tree_lock); 2699 2700 if (ctl->free_space < minlen) { 2701 spin_unlock(&ctl->tree_lock); 2702 break; 2703 } 2704 2705 entry = tree_search_offset(ctl, start, 0, 1); 2706 if (!entry) { 2707 spin_unlock(&ctl->tree_lock); 2708 break; 2709 } 2710 2711 /* skip bitmaps */ 2712 while (entry->bitmap) { 2713 node = rb_next(&entry->offset_index); 2714 if (!node) { 2715 spin_unlock(&ctl->tree_lock); 2716 goto out; 2717 } 2718 entry = rb_entry(node, struct btrfs_free_space, 2719 offset_index); 2720 } 2721 2722 if (entry->offset >= end) { 2723 spin_unlock(&ctl->tree_lock); 2724 break; 2725 } 2726 2727 extent_start = entry->offset; 2728 extent_bytes = entry->bytes; 2729 start = max(start, extent_start); 2730 bytes = min(extent_start + extent_bytes, end) - start; 2731 if (bytes < minlen) { 2732 spin_unlock(&ctl->tree_lock); 2733 goto next; 2734 } 2735 2736 unlink_free_space(ctl, entry); 2737 kmem_cache_free(btrfs_free_space_cachep, entry); 2738 2739 spin_unlock(&ctl->tree_lock); 2740 2741 ret = do_trimming(block_group, total_trimmed, start, bytes, 2742 extent_start, extent_bytes); 2743 if (ret) 2744 break; 2745 next: 2746 start += bytes; 2747 2748 if (fatal_signal_pending(current)) { 2749 ret = -ERESTARTSYS; 2750 break; 2751 } 2752 2753 cond_resched(); 2754 } 2755 out: 2756 return ret; 2757 } 2758 2759 static int trim_bitmaps(struct btrfs_block_group_cache *block_group, 2760 u64 *total_trimmed, u64 start, u64 end, u64 minlen) 2761 { 2762 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2763 struct btrfs_free_space *entry; 2764 int ret = 0; 2765 int ret2; 2766 u64 bytes; 2767 u64 offset = offset_to_bitmap(ctl, start); 2768 2769 while (offset < end) { 2770 bool next_bitmap = false; 2771 2772 spin_lock(&ctl->tree_lock); 2773 2774 if (ctl->free_space < minlen) { 2775 spin_unlock(&ctl->tree_lock); 2776 break; 2777 } 2778 2779 entry = tree_search_offset(ctl, offset, 1, 0); 2780 if (!entry) { 2781 spin_unlock(&ctl->tree_lock); 2782 next_bitmap = true; 2783 goto next; 2784 } 2785 2786 bytes = minlen; 2787 ret2 = search_bitmap(ctl, entry, &start, &bytes); 2788 if (ret2 || start >= end) { 2789 spin_unlock(&ctl->tree_lock); 2790 next_bitmap = true; 2791 goto next; 2792 } 2793 2794 bytes = min(bytes, end - start); 2795 if (bytes < minlen) { 2796 spin_unlock(&ctl->tree_lock); 2797 goto next; 2798 } 2799 2800 bitmap_clear_bits(ctl, entry, start, bytes); 2801 if (entry->bytes == 0) 2802 free_bitmap(ctl, entry); 2803 2804 spin_unlock(&ctl->tree_lock); 2805 2806 ret = do_trimming(block_group, total_trimmed, start, bytes, 2807 start, bytes); 2808 if (ret) 2809 break; 2810 next: 2811 if (next_bitmap) { 2812 offset += BITS_PER_BITMAP * ctl->unit; 2813 } else { 2814 start += bytes; 2815 if (start >= offset + BITS_PER_BITMAP * ctl->unit) 2816 offset += BITS_PER_BITMAP * ctl->unit; 2817 } 2818 2819 if (fatal_signal_pending(current)) { 2820 ret = -ERESTARTSYS; 2821 break; 2822 } 2823 2824 cond_resched(); 2825 } 2826 2827 return ret; 2828 } 2829 2830 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, 2831 u64 *trimmed, u64 start, u64 end, u64 minlen) 2832 { 2833 int ret; 2834 2835 *trimmed = 0; 2836 2837 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); 2838 if (ret) 2839 return ret; 2840 2841 ret = trim_bitmaps(block_group, trimmed, start, end, minlen); 2842 2843 return ret; 2844 } 2845 2846 /* 2847 * Find the left-most item in the cache tree, and then return the 2848 * smallest inode number in the item. 2849 * 2850 * Note: the returned inode number may not be the smallest one in 2851 * the tree, if the left-most item is a bitmap. 2852 */ 2853 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) 2854 { 2855 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; 2856 struct btrfs_free_space *entry = NULL; 2857 u64 ino = 0; 2858 2859 spin_lock(&ctl->tree_lock); 2860 2861 if (RB_EMPTY_ROOT(&ctl->free_space_offset)) 2862 goto out; 2863 2864 entry = rb_entry(rb_first(&ctl->free_space_offset), 2865 struct btrfs_free_space, offset_index); 2866 2867 if (!entry->bitmap) { 2868 ino = entry->offset; 2869 2870 unlink_free_space(ctl, entry); 2871 entry->offset++; 2872 entry->bytes--; 2873 if (!entry->bytes) 2874 kmem_cache_free(btrfs_free_space_cachep, entry); 2875 else 2876 link_free_space(ctl, entry); 2877 } else { 2878 u64 offset = 0; 2879 u64 count = 1; 2880 int ret; 2881 2882 ret = search_bitmap(ctl, entry, &offset, &count); 2883 /* Logic error; Should be empty if it can't find anything */ 2884 ASSERT(!ret); 2885 2886 ino = offset; 2887 bitmap_clear_bits(ctl, entry, offset, 1); 2888 if (entry->bytes == 0) 2889 free_bitmap(ctl, entry); 2890 } 2891 out: 2892 spin_unlock(&ctl->tree_lock); 2893 2894 return ino; 2895 } 2896 2897 struct inode *lookup_free_ino_inode(struct btrfs_root *root, 2898 struct btrfs_path *path) 2899 { 2900 struct inode *inode = NULL; 2901 2902 spin_lock(&root->cache_lock); 2903 if (root->cache_inode) 2904 inode = igrab(root->cache_inode); 2905 spin_unlock(&root->cache_lock); 2906 if (inode) 2907 return inode; 2908 2909 inode = __lookup_free_space_inode(root, path, 0); 2910 if (IS_ERR(inode)) 2911 return inode; 2912 2913 spin_lock(&root->cache_lock); 2914 if (!btrfs_fs_closing(root->fs_info)) 2915 root->cache_inode = igrab(inode); 2916 spin_unlock(&root->cache_lock); 2917 2918 return inode; 2919 } 2920 2921 int create_free_ino_inode(struct btrfs_root *root, 2922 struct btrfs_trans_handle *trans, 2923 struct btrfs_path *path) 2924 { 2925 return __create_free_space_inode(root, trans, path, 2926 BTRFS_FREE_INO_OBJECTID, 0); 2927 } 2928 2929 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) 2930 { 2931 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 2932 struct btrfs_path *path; 2933 struct inode *inode; 2934 int ret = 0; 2935 u64 root_gen = btrfs_root_generation(&root->root_item); 2936 2937 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 2938 return 0; 2939 2940 /* 2941 * If we're unmounting then just return, since this does a search on the 2942 * normal root and not the commit root and we could deadlock. 2943 */ 2944 if (btrfs_fs_closing(fs_info)) 2945 return 0; 2946 2947 path = btrfs_alloc_path(); 2948 if (!path) 2949 return 0; 2950 2951 inode = lookup_free_ino_inode(root, path); 2952 if (IS_ERR(inode)) 2953 goto out; 2954 2955 if (root_gen != BTRFS_I(inode)->generation) 2956 goto out_put; 2957 2958 ret = __load_free_space_cache(root, inode, ctl, path, 0); 2959 2960 if (ret < 0) 2961 btrfs_err(fs_info, 2962 "failed to load free ino cache for root %llu", 2963 root->root_key.objectid); 2964 out_put: 2965 iput(inode); 2966 out: 2967 btrfs_free_path(path); 2968 return ret; 2969 } 2970 2971 int btrfs_write_out_ino_cache(struct btrfs_root *root, 2972 struct btrfs_trans_handle *trans, 2973 struct btrfs_path *path, 2974 struct inode *inode) 2975 { 2976 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 2977 int ret; 2978 2979 if (!btrfs_test_opt(root, INODE_MAP_CACHE)) 2980 return 0; 2981 2982 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); 2983 if (ret) { 2984 btrfs_delalloc_release_metadata(inode, inode->i_size); 2985 #ifdef DEBUG 2986 btrfs_err(root->fs_info, 2987 "failed to write free ino cache for root %llu", 2988 root->root_key.objectid); 2989 #endif 2990 } 2991 2992 return ret; 2993 } 2994 2995 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 2996 /* 2997 * Use this if you need to make a bitmap or extent entry specifically, it 2998 * doesn't do any of the merging that add_free_space does, this acts a lot like 2999 * how the free space cache loading stuff works, so you can get really weird 3000 * configurations. 3001 */ 3002 int test_add_free_space_entry(struct btrfs_block_group_cache *cache, 3003 u64 offset, u64 bytes, bool bitmap) 3004 { 3005 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; 3006 struct btrfs_free_space *info = NULL, *bitmap_info; 3007 void *map = NULL; 3008 u64 bytes_added; 3009 int ret; 3010 3011 again: 3012 if (!info) { 3013 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); 3014 if (!info) 3015 return -ENOMEM; 3016 } 3017 3018 if (!bitmap) { 3019 spin_lock(&ctl->tree_lock); 3020 info->offset = offset; 3021 info->bytes = bytes; 3022 ret = link_free_space(ctl, info); 3023 spin_unlock(&ctl->tree_lock); 3024 if (ret) 3025 kmem_cache_free(btrfs_free_space_cachep, info); 3026 return ret; 3027 } 3028 3029 if (!map) { 3030 map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 3031 if (!map) { 3032 kmem_cache_free(btrfs_free_space_cachep, info); 3033 return -ENOMEM; 3034 } 3035 } 3036 3037 spin_lock(&ctl->tree_lock); 3038 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 3039 1, 0); 3040 if (!bitmap_info) { 3041 info->bitmap = map; 3042 map = NULL; 3043 add_new_bitmap(ctl, info, offset); 3044 bitmap_info = info; 3045 } 3046 3047 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); 3048 bytes -= bytes_added; 3049 offset += bytes_added; 3050 spin_unlock(&ctl->tree_lock); 3051 3052 if (bytes) 3053 goto again; 3054 3055 if (map) 3056 kfree(map); 3057 return 0; 3058 } 3059 3060 /* 3061 * Checks to see if the given range is in the free space cache. This is really 3062 * just used to check the absence of space, so if there is free space in the 3063 * range at all we will return 1. 3064 */ 3065 int test_check_exists(struct btrfs_block_group_cache *cache, 3066 u64 offset, u64 bytes) 3067 { 3068 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; 3069 struct btrfs_free_space *info; 3070 int ret = 0; 3071 3072 spin_lock(&ctl->tree_lock); 3073 info = tree_search_offset(ctl, offset, 0, 0); 3074 if (!info) { 3075 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 3076 1, 0); 3077 if (!info) 3078 goto out; 3079 } 3080 3081 have_info: 3082 if (info->bitmap) { 3083 u64 bit_off, bit_bytes; 3084 struct rb_node *n; 3085 struct btrfs_free_space *tmp; 3086 3087 bit_off = offset; 3088 bit_bytes = ctl->unit; 3089 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes); 3090 if (!ret) { 3091 if (bit_off == offset) { 3092 ret = 1; 3093 goto out; 3094 } else if (bit_off > offset && 3095 offset + bytes > bit_off) { 3096 ret = 1; 3097 goto out; 3098 } 3099 } 3100 3101 n = rb_prev(&info->offset_index); 3102 while (n) { 3103 tmp = rb_entry(n, struct btrfs_free_space, 3104 offset_index); 3105 if (tmp->offset + tmp->bytes < offset) 3106 break; 3107 if (offset + bytes < tmp->offset) { 3108 n = rb_prev(&info->offset_index); 3109 continue; 3110 } 3111 info = tmp; 3112 goto have_info; 3113 } 3114 3115 n = rb_next(&info->offset_index); 3116 while (n) { 3117 tmp = rb_entry(n, struct btrfs_free_space, 3118 offset_index); 3119 if (offset + bytes < tmp->offset) 3120 break; 3121 if (tmp->offset + tmp->bytes < offset) { 3122 n = rb_next(&info->offset_index); 3123 continue; 3124 } 3125 info = tmp; 3126 goto have_info; 3127 } 3128 3129 goto out; 3130 } 3131 3132 if (info->offset == offset) { 3133 ret = 1; 3134 goto out; 3135 } 3136 3137 if (offset > info->offset && offset < info->offset + info->bytes) 3138 ret = 1; 3139 out: 3140 spin_unlock(&ctl->tree_lock); 3141 return ret; 3142 } 3143 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */ 3144