1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/blkdev.h> 21 #include <linux/writeback.h> 22 #include <linux/pagevec.h> 23 #include "ctree.h" 24 #include "transaction.h" 25 #include "btrfs_inode.h" 26 #include "extent_io.h" 27 #include "disk-io.h" 28 29 static struct kmem_cache *btrfs_ordered_extent_cache; 30 31 static u64 entry_end(struct btrfs_ordered_extent *entry) 32 { 33 if (entry->file_offset + entry->len < entry->file_offset) 34 return (u64)-1; 35 return entry->file_offset + entry->len; 36 } 37 38 /* returns NULL if the insertion worked, or it returns the node it did find 39 * in the tree 40 */ 41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 42 struct rb_node *node) 43 { 44 struct rb_node **p = &root->rb_node; 45 struct rb_node *parent = NULL; 46 struct btrfs_ordered_extent *entry; 47 48 while (*p) { 49 parent = *p; 50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 51 52 if (file_offset < entry->file_offset) 53 p = &(*p)->rb_left; 54 else if (file_offset >= entry_end(entry)) 55 p = &(*p)->rb_right; 56 else 57 return parent; 58 } 59 60 rb_link_node(node, parent, p); 61 rb_insert_color(node, root); 62 return NULL; 63 } 64 65 static void ordered_data_tree_panic(struct inode *inode, int errno, 66 u64 offset) 67 { 68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " 70 "%llu\n", offset); 71 } 72 73 /* 74 * look for a given offset in the tree, and if it can't be found return the 75 * first lesser offset 76 */ 77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 78 struct rb_node **prev_ret) 79 { 80 struct rb_node *n = root->rb_node; 81 struct rb_node *prev = NULL; 82 struct rb_node *test; 83 struct btrfs_ordered_extent *entry; 84 struct btrfs_ordered_extent *prev_entry = NULL; 85 86 while (n) { 87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 88 prev = n; 89 prev_entry = entry; 90 91 if (file_offset < entry->file_offset) 92 n = n->rb_left; 93 else if (file_offset >= entry_end(entry)) 94 n = n->rb_right; 95 else 96 return n; 97 } 98 if (!prev_ret) 99 return NULL; 100 101 while (prev && file_offset >= entry_end(prev_entry)) { 102 test = rb_next(prev); 103 if (!test) 104 break; 105 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 106 rb_node); 107 if (file_offset < entry_end(prev_entry)) 108 break; 109 110 prev = test; 111 } 112 if (prev) 113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 114 rb_node); 115 while (prev && file_offset < entry_end(prev_entry)) { 116 test = rb_prev(prev); 117 if (!test) 118 break; 119 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 120 rb_node); 121 prev = test; 122 } 123 *prev_ret = prev; 124 return NULL; 125 } 126 127 /* 128 * helper to check if a given offset is inside a given entry 129 */ 130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 131 { 132 if (file_offset < entry->file_offset || 133 entry->file_offset + entry->len <= file_offset) 134 return 0; 135 return 1; 136 } 137 138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 139 u64 len) 140 { 141 if (file_offset + len <= entry->file_offset || 142 entry->file_offset + entry->len <= file_offset) 143 return 0; 144 return 1; 145 } 146 147 /* 148 * look find the first ordered struct that has this offset, otherwise 149 * the first one less than this offset 150 */ 151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 152 u64 file_offset) 153 { 154 struct rb_root *root = &tree->tree; 155 struct rb_node *prev = NULL; 156 struct rb_node *ret; 157 struct btrfs_ordered_extent *entry; 158 159 if (tree->last) { 160 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 161 rb_node); 162 if (offset_in_entry(entry, file_offset)) 163 return tree->last; 164 } 165 ret = __tree_search(root, file_offset, &prev); 166 if (!ret) 167 ret = prev; 168 if (ret) 169 tree->last = ret; 170 return ret; 171 } 172 173 /* allocate and add a new ordered_extent into the per-inode tree. 174 * file_offset is the logical offset in the file 175 * 176 * start is the disk block number of an extent already reserved in the 177 * extent allocation tree 178 * 179 * len is the length of the extent 180 * 181 * The tree is given a single reference on the ordered extent that was 182 * inserted. 183 */ 184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 185 u64 start, u64 len, u64 disk_len, 186 int type, int dio, int compress_type) 187 { 188 struct btrfs_root *root = BTRFS_I(inode)->root; 189 struct btrfs_ordered_inode_tree *tree; 190 struct rb_node *node; 191 struct btrfs_ordered_extent *entry; 192 193 tree = &BTRFS_I(inode)->ordered_tree; 194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 195 if (!entry) 196 return -ENOMEM; 197 198 entry->file_offset = file_offset; 199 entry->start = start; 200 entry->len = len; 201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) && 202 !(type == BTRFS_ORDERED_NOCOW)) 203 entry->csum_bytes_left = disk_len; 204 entry->disk_len = disk_len; 205 entry->bytes_left = len; 206 entry->inode = igrab(inode); 207 entry->compress_type = compress_type; 208 entry->truncated_len = (u64)-1; 209 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 210 set_bit(type, &entry->flags); 211 212 if (dio) 213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 214 215 /* one ref for the tree */ 216 atomic_set(&entry->refs, 1); 217 init_waitqueue_head(&entry->wait); 218 INIT_LIST_HEAD(&entry->list); 219 INIT_LIST_HEAD(&entry->root_extent_list); 220 INIT_LIST_HEAD(&entry->work_list); 221 init_completion(&entry->completion); 222 INIT_LIST_HEAD(&entry->log_list); 223 224 trace_btrfs_ordered_extent_add(inode, entry); 225 226 spin_lock_irq(&tree->lock); 227 node = tree_insert(&tree->tree, file_offset, 228 &entry->rb_node); 229 if (node) 230 ordered_data_tree_panic(inode, -EEXIST, file_offset); 231 spin_unlock_irq(&tree->lock); 232 233 spin_lock(&root->ordered_extent_lock); 234 list_add_tail(&entry->root_extent_list, 235 &root->ordered_extents); 236 root->nr_ordered_extents++; 237 if (root->nr_ordered_extents == 1) { 238 spin_lock(&root->fs_info->ordered_root_lock); 239 BUG_ON(!list_empty(&root->ordered_root)); 240 list_add_tail(&root->ordered_root, 241 &root->fs_info->ordered_roots); 242 spin_unlock(&root->fs_info->ordered_root_lock); 243 } 244 spin_unlock(&root->ordered_extent_lock); 245 246 return 0; 247 } 248 249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 250 u64 start, u64 len, u64 disk_len, int type) 251 { 252 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 253 disk_len, type, 0, 254 BTRFS_COMPRESS_NONE); 255 } 256 257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 258 u64 start, u64 len, u64 disk_len, int type) 259 { 260 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 261 disk_len, type, 1, 262 BTRFS_COMPRESS_NONE); 263 } 264 265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 266 u64 start, u64 len, u64 disk_len, 267 int type, int compress_type) 268 { 269 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 270 disk_len, type, 0, 271 compress_type); 272 } 273 274 /* 275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 276 * when an ordered extent is finished. If the list covers more than one 277 * ordered extent, it is split across multiples. 278 */ 279 void btrfs_add_ordered_sum(struct inode *inode, 280 struct btrfs_ordered_extent *entry, 281 struct btrfs_ordered_sum *sum) 282 { 283 struct btrfs_ordered_inode_tree *tree; 284 285 tree = &BTRFS_I(inode)->ordered_tree; 286 spin_lock_irq(&tree->lock); 287 list_add_tail(&sum->list, &entry->list); 288 WARN_ON(entry->csum_bytes_left < sum->len); 289 entry->csum_bytes_left -= sum->len; 290 if (entry->csum_bytes_left == 0) 291 wake_up(&entry->wait); 292 spin_unlock_irq(&tree->lock); 293 } 294 295 /* 296 * this is used to account for finished IO across a given range 297 * of the file. The IO may span ordered extents. If 298 * a given ordered_extent is completely done, 1 is returned, otherwise 299 * 0. 300 * 301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 302 * to make sure this function only returns 1 once for a given ordered extent. 303 * 304 * file_offset is updated to one byte past the range that is recorded as 305 * complete. This allows you to walk forward in the file. 306 */ 307 int btrfs_dec_test_first_ordered_pending(struct inode *inode, 308 struct btrfs_ordered_extent **cached, 309 u64 *file_offset, u64 io_size, int uptodate) 310 { 311 struct btrfs_ordered_inode_tree *tree; 312 struct rb_node *node; 313 struct btrfs_ordered_extent *entry = NULL; 314 int ret; 315 unsigned long flags; 316 u64 dec_end; 317 u64 dec_start; 318 u64 to_dec; 319 320 tree = &BTRFS_I(inode)->ordered_tree; 321 spin_lock_irqsave(&tree->lock, flags); 322 node = tree_search(tree, *file_offset); 323 if (!node) { 324 ret = 1; 325 goto out; 326 } 327 328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 329 if (!offset_in_entry(entry, *file_offset)) { 330 ret = 1; 331 goto out; 332 } 333 334 dec_start = max(*file_offset, entry->file_offset); 335 dec_end = min(*file_offset + io_size, entry->file_offset + 336 entry->len); 337 *file_offset = dec_end; 338 if (dec_start > dec_end) { 339 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n", 340 dec_start, dec_end); 341 } 342 to_dec = dec_end - dec_start; 343 if (to_dec > entry->bytes_left) { 344 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", 345 entry->bytes_left, to_dec); 346 } 347 entry->bytes_left -= to_dec; 348 if (!uptodate) 349 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 350 351 if (entry->bytes_left == 0) 352 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 353 else 354 ret = 1; 355 out: 356 if (!ret && cached && entry) { 357 *cached = entry; 358 atomic_inc(&entry->refs); 359 } 360 spin_unlock_irqrestore(&tree->lock, flags); 361 return ret == 0; 362 } 363 364 /* 365 * this is used to account for finished IO across a given range 366 * of the file. The IO should not span ordered extents. If 367 * a given ordered_extent is completely done, 1 is returned, otherwise 368 * 0. 369 * 370 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 371 * to make sure this function only returns 1 once for a given ordered extent. 372 */ 373 int btrfs_dec_test_ordered_pending(struct inode *inode, 374 struct btrfs_ordered_extent **cached, 375 u64 file_offset, u64 io_size, int uptodate) 376 { 377 struct btrfs_ordered_inode_tree *tree; 378 struct rb_node *node; 379 struct btrfs_ordered_extent *entry = NULL; 380 unsigned long flags; 381 int ret; 382 383 tree = &BTRFS_I(inode)->ordered_tree; 384 spin_lock_irqsave(&tree->lock, flags); 385 if (cached && *cached) { 386 entry = *cached; 387 goto have_entry; 388 } 389 390 node = tree_search(tree, file_offset); 391 if (!node) { 392 ret = 1; 393 goto out; 394 } 395 396 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 397 have_entry: 398 if (!offset_in_entry(entry, file_offset)) { 399 ret = 1; 400 goto out; 401 } 402 403 if (io_size > entry->bytes_left) { 404 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", 405 entry->bytes_left, io_size); 406 } 407 entry->bytes_left -= io_size; 408 if (!uptodate) 409 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 410 411 if (entry->bytes_left == 0) 412 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 413 else 414 ret = 1; 415 out: 416 if (!ret && cached && entry) { 417 *cached = entry; 418 atomic_inc(&entry->refs); 419 } 420 spin_unlock_irqrestore(&tree->lock, flags); 421 return ret == 0; 422 } 423 424 /* Needs to either be called under a log transaction or the log_mutex */ 425 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode) 426 { 427 struct btrfs_ordered_inode_tree *tree; 428 struct btrfs_ordered_extent *ordered; 429 struct rb_node *n; 430 int index = log->log_transid % 2; 431 432 tree = &BTRFS_I(inode)->ordered_tree; 433 spin_lock_irq(&tree->lock); 434 for (n = rb_first(&tree->tree); n; n = rb_next(n)) { 435 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 436 spin_lock(&log->log_extents_lock[index]); 437 if (list_empty(&ordered->log_list)) { 438 list_add_tail(&ordered->log_list, &log->logged_list[index]); 439 atomic_inc(&ordered->refs); 440 } 441 spin_unlock(&log->log_extents_lock[index]); 442 } 443 spin_unlock_irq(&tree->lock); 444 } 445 446 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) 447 { 448 struct btrfs_ordered_extent *ordered; 449 int index = transid % 2; 450 451 spin_lock_irq(&log->log_extents_lock[index]); 452 while (!list_empty(&log->logged_list[index])) { 453 ordered = list_first_entry(&log->logged_list[index], 454 struct btrfs_ordered_extent, 455 log_list); 456 list_del_init(&ordered->log_list); 457 spin_unlock_irq(&log->log_extents_lock[index]); 458 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 459 &ordered->flags)); 460 btrfs_put_ordered_extent(ordered); 461 spin_lock_irq(&log->log_extents_lock[index]); 462 } 463 spin_unlock_irq(&log->log_extents_lock[index]); 464 } 465 466 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) 467 { 468 struct btrfs_ordered_extent *ordered; 469 int index = transid % 2; 470 471 spin_lock_irq(&log->log_extents_lock[index]); 472 while (!list_empty(&log->logged_list[index])) { 473 ordered = list_first_entry(&log->logged_list[index], 474 struct btrfs_ordered_extent, 475 log_list); 476 list_del_init(&ordered->log_list); 477 spin_unlock_irq(&log->log_extents_lock[index]); 478 btrfs_put_ordered_extent(ordered); 479 spin_lock_irq(&log->log_extents_lock[index]); 480 } 481 spin_unlock_irq(&log->log_extents_lock[index]); 482 } 483 484 /* 485 * used to drop a reference on an ordered extent. This will free 486 * the extent if the last reference is dropped 487 */ 488 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 489 { 490 struct list_head *cur; 491 struct btrfs_ordered_sum *sum; 492 493 trace_btrfs_ordered_extent_put(entry->inode, entry); 494 495 if (atomic_dec_and_test(&entry->refs)) { 496 if (entry->inode) 497 btrfs_add_delayed_iput(entry->inode); 498 while (!list_empty(&entry->list)) { 499 cur = entry->list.next; 500 sum = list_entry(cur, struct btrfs_ordered_sum, list); 501 list_del(&sum->list); 502 kfree(sum); 503 } 504 kmem_cache_free(btrfs_ordered_extent_cache, entry); 505 } 506 } 507 508 /* 509 * remove an ordered extent from the tree. No references are dropped 510 * and waiters are woken up. 511 */ 512 void btrfs_remove_ordered_extent(struct inode *inode, 513 struct btrfs_ordered_extent *entry) 514 { 515 struct btrfs_ordered_inode_tree *tree; 516 struct btrfs_root *root = BTRFS_I(inode)->root; 517 struct rb_node *node; 518 519 tree = &BTRFS_I(inode)->ordered_tree; 520 spin_lock_irq(&tree->lock); 521 node = &entry->rb_node; 522 rb_erase(node, &tree->tree); 523 tree->last = NULL; 524 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 525 spin_unlock_irq(&tree->lock); 526 527 spin_lock(&root->ordered_extent_lock); 528 list_del_init(&entry->root_extent_list); 529 root->nr_ordered_extents--; 530 531 trace_btrfs_ordered_extent_remove(inode, entry); 532 533 /* 534 * we have no more ordered extents for this inode and 535 * no dirty pages. We can safely remove it from the 536 * list of ordered extents 537 */ 538 if (RB_EMPTY_ROOT(&tree->tree) && 539 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 540 spin_lock(&root->fs_info->ordered_root_lock); 541 list_del_init(&BTRFS_I(inode)->ordered_operations); 542 spin_unlock(&root->fs_info->ordered_root_lock); 543 } 544 545 if (!root->nr_ordered_extents) { 546 spin_lock(&root->fs_info->ordered_root_lock); 547 BUG_ON(list_empty(&root->ordered_root)); 548 list_del_init(&root->ordered_root); 549 spin_unlock(&root->fs_info->ordered_root_lock); 550 } 551 spin_unlock(&root->ordered_extent_lock); 552 wake_up(&entry->wait); 553 } 554 555 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 556 { 557 struct btrfs_ordered_extent *ordered; 558 559 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 560 btrfs_start_ordered_extent(ordered->inode, ordered, 1); 561 complete(&ordered->completion); 562 } 563 564 /* 565 * wait for all the ordered extents in a root. This is done when balancing 566 * space between drives. 567 */ 568 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) 569 { 570 struct list_head splice, works; 571 struct btrfs_ordered_extent *ordered, *next; 572 int count = 0; 573 574 INIT_LIST_HEAD(&splice); 575 INIT_LIST_HEAD(&works); 576 577 mutex_lock(&root->fs_info->ordered_operations_mutex); 578 spin_lock(&root->ordered_extent_lock); 579 list_splice_init(&root->ordered_extents, &splice); 580 while (!list_empty(&splice) && nr) { 581 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 582 root_extent_list); 583 list_move_tail(&ordered->root_extent_list, 584 &root->ordered_extents); 585 atomic_inc(&ordered->refs); 586 spin_unlock(&root->ordered_extent_lock); 587 588 ordered->flush_work.func = btrfs_run_ordered_extent_work; 589 list_add_tail(&ordered->work_list, &works); 590 btrfs_queue_worker(&root->fs_info->flush_workers, 591 &ordered->flush_work); 592 593 cond_resched(); 594 spin_lock(&root->ordered_extent_lock); 595 if (nr != -1) 596 nr--; 597 count++; 598 } 599 list_splice_tail(&splice, &root->ordered_extents); 600 spin_unlock(&root->ordered_extent_lock); 601 602 list_for_each_entry_safe(ordered, next, &works, work_list) { 603 list_del_init(&ordered->work_list); 604 wait_for_completion(&ordered->completion); 605 btrfs_put_ordered_extent(ordered); 606 cond_resched(); 607 } 608 mutex_unlock(&root->fs_info->ordered_operations_mutex); 609 610 return count; 611 } 612 613 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) 614 { 615 struct btrfs_root *root; 616 struct list_head splice; 617 int done; 618 619 INIT_LIST_HEAD(&splice); 620 621 spin_lock(&fs_info->ordered_root_lock); 622 list_splice_init(&fs_info->ordered_roots, &splice); 623 while (!list_empty(&splice) && nr) { 624 root = list_first_entry(&splice, struct btrfs_root, 625 ordered_root); 626 root = btrfs_grab_fs_root(root); 627 BUG_ON(!root); 628 list_move_tail(&root->ordered_root, 629 &fs_info->ordered_roots); 630 spin_unlock(&fs_info->ordered_root_lock); 631 632 done = btrfs_wait_ordered_extents(root, nr); 633 btrfs_put_fs_root(root); 634 635 spin_lock(&fs_info->ordered_root_lock); 636 if (nr != -1) { 637 nr -= done; 638 WARN_ON(nr < 0); 639 } 640 } 641 spin_unlock(&fs_info->ordered_root_lock); 642 } 643 644 /* 645 * this is used during transaction commit to write all the inodes 646 * added to the ordered operation list. These files must be fully on 647 * disk before the transaction commits. 648 * 649 * we have two modes here, one is to just start the IO via filemap_flush 650 * and the other is to wait for all the io. When we wait, we have an 651 * extra check to make sure the ordered operation list really is empty 652 * before we return 653 */ 654 int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans, 655 struct btrfs_root *root, int wait) 656 { 657 struct btrfs_inode *btrfs_inode; 658 struct inode *inode; 659 struct btrfs_transaction *cur_trans = trans->transaction; 660 struct list_head splice; 661 struct list_head works; 662 struct btrfs_delalloc_work *work, *next; 663 int ret = 0; 664 665 INIT_LIST_HEAD(&splice); 666 INIT_LIST_HEAD(&works); 667 668 mutex_lock(&root->fs_info->ordered_extent_flush_mutex); 669 spin_lock(&root->fs_info->ordered_root_lock); 670 list_splice_init(&cur_trans->ordered_operations, &splice); 671 while (!list_empty(&splice)) { 672 btrfs_inode = list_entry(splice.next, struct btrfs_inode, 673 ordered_operations); 674 inode = &btrfs_inode->vfs_inode; 675 676 list_del_init(&btrfs_inode->ordered_operations); 677 678 /* 679 * the inode may be getting freed (in sys_unlink path). 680 */ 681 inode = igrab(inode); 682 if (!inode) 683 continue; 684 685 if (!wait) 686 list_add_tail(&BTRFS_I(inode)->ordered_operations, 687 &cur_trans->ordered_operations); 688 spin_unlock(&root->fs_info->ordered_root_lock); 689 690 work = btrfs_alloc_delalloc_work(inode, wait, 1); 691 if (!work) { 692 spin_lock(&root->fs_info->ordered_root_lock); 693 if (list_empty(&BTRFS_I(inode)->ordered_operations)) 694 list_add_tail(&btrfs_inode->ordered_operations, 695 &splice); 696 list_splice_tail(&splice, 697 &cur_trans->ordered_operations); 698 spin_unlock(&root->fs_info->ordered_root_lock); 699 ret = -ENOMEM; 700 goto out; 701 } 702 list_add_tail(&work->list, &works); 703 btrfs_queue_worker(&root->fs_info->flush_workers, 704 &work->work); 705 706 cond_resched(); 707 spin_lock(&root->fs_info->ordered_root_lock); 708 } 709 spin_unlock(&root->fs_info->ordered_root_lock); 710 out: 711 list_for_each_entry_safe(work, next, &works, list) { 712 list_del_init(&work->list); 713 btrfs_wait_and_free_delalloc_work(work); 714 } 715 mutex_unlock(&root->fs_info->ordered_extent_flush_mutex); 716 return ret; 717 } 718 719 /* 720 * Used to start IO or wait for a given ordered extent to finish. 721 * 722 * If wait is one, this effectively waits on page writeback for all the pages 723 * in the extent, and it waits on the io completion code to insert 724 * metadata into the btree corresponding to the extent 725 */ 726 void btrfs_start_ordered_extent(struct inode *inode, 727 struct btrfs_ordered_extent *entry, 728 int wait) 729 { 730 u64 start = entry->file_offset; 731 u64 end = start + entry->len - 1; 732 733 trace_btrfs_ordered_extent_start(inode, entry); 734 735 /* 736 * pages in the range can be dirty, clean or writeback. We 737 * start IO on any dirty ones so the wait doesn't stall waiting 738 * for the flusher thread to find them 739 */ 740 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 741 filemap_fdatawrite_range(inode->i_mapping, start, end); 742 if (wait) { 743 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 744 &entry->flags)); 745 } 746 } 747 748 /* 749 * Used to wait on ordered extents across a large range of bytes. 750 */ 751 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 752 { 753 int ret = 0; 754 u64 end; 755 u64 orig_end; 756 struct btrfs_ordered_extent *ordered; 757 758 if (start + len < start) { 759 orig_end = INT_LIMIT(loff_t); 760 } else { 761 orig_end = start + len - 1; 762 if (orig_end > INT_LIMIT(loff_t)) 763 orig_end = INT_LIMIT(loff_t); 764 } 765 766 /* start IO across the range first to instantiate any delalloc 767 * extents 768 */ 769 ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 770 if (ret) 771 return ret; 772 /* 773 * So with compression we will find and lock a dirty page and clear the 774 * first one as dirty, setup an async extent, and immediately return 775 * with the entire range locked but with nobody actually marked with 776 * writeback. So we can't just filemap_write_and_wait_range() and 777 * expect it to work since it will just kick off a thread to do the 778 * actual work. So we need to call filemap_fdatawrite_range _again_ 779 * since it will wait on the page lock, which won't be unlocked until 780 * after the pages have been marked as writeback and so we're good to go 781 * from there. We have to do this otherwise we'll miss the ordered 782 * extents and that results in badness. Please Josef, do not think you 783 * know better and pull this out at some point in the future, it is 784 * right and you are wrong. 785 */ 786 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 787 &BTRFS_I(inode)->runtime_flags)) { 788 ret = filemap_fdatawrite_range(inode->i_mapping, start, 789 orig_end); 790 if (ret) 791 return ret; 792 } 793 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 794 if (ret) 795 return ret; 796 797 end = orig_end; 798 while (1) { 799 ordered = btrfs_lookup_first_ordered_extent(inode, end); 800 if (!ordered) 801 break; 802 if (ordered->file_offset > orig_end) { 803 btrfs_put_ordered_extent(ordered); 804 break; 805 } 806 if (ordered->file_offset + ordered->len < start) { 807 btrfs_put_ordered_extent(ordered); 808 break; 809 } 810 btrfs_start_ordered_extent(inode, ordered, 1); 811 end = ordered->file_offset; 812 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 813 ret = -EIO; 814 btrfs_put_ordered_extent(ordered); 815 if (ret || end == 0 || end == start) 816 break; 817 end--; 818 } 819 return ret; 820 } 821 822 /* 823 * find an ordered extent corresponding to file_offset. return NULL if 824 * nothing is found, otherwise take a reference on the extent and return it 825 */ 826 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 827 u64 file_offset) 828 { 829 struct btrfs_ordered_inode_tree *tree; 830 struct rb_node *node; 831 struct btrfs_ordered_extent *entry = NULL; 832 833 tree = &BTRFS_I(inode)->ordered_tree; 834 spin_lock_irq(&tree->lock); 835 node = tree_search(tree, file_offset); 836 if (!node) 837 goto out; 838 839 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 840 if (!offset_in_entry(entry, file_offset)) 841 entry = NULL; 842 if (entry) 843 atomic_inc(&entry->refs); 844 out: 845 spin_unlock_irq(&tree->lock); 846 return entry; 847 } 848 849 /* Since the DIO code tries to lock a wide area we need to look for any ordered 850 * extents that exist in the range, rather than just the start of the range. 851 */ 852 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, 853 u64 file_offset, 854 u64 len) 855 { 856 struct btrfs_ordered_inode_tree *tree; 857 struct rb_node *node; 858 struct btrfs_ordered_extent *entry = NULL; 859 860 tree = &BTRFS_I(inode)->ordered_tree; 861 spin_lock_irq(&tree->lock); 862 node = tree_search(tree, file_offset); 863 if (!node) { 864 node = tree_search(tree, file_offset + len); 865 if (!node) 866 goto out; 867 } 868 869 while (1) { 870 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 871 if (range_overlaps(entry, file_offset, len)) 872 break; 873 874 if (entry->file_offset >= file_offset + len) { 875 entry = NULL; 876 break; 877 } 878 entry = NULL; 879 node = rb_next(node); 880 if (!node) 881 break; 882 } 883 out: 884 if (entry) 885 atomic_inc(&entry->refs); 886 spin_unlock_irq(&tree->lock); 887 return entry; 888 } 889 890 /* 891 * lookup and return any extent before 'file_offset'. NULL is returned 892 * if none is found 893 */ 894 struct btrfs_ordered_extent * 895 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) 896 { 897 struct btrfs_ordered_inode_tree *tree; 898 struct rb_node *node; 899 struct btrfs_ordered_extent *entry = NULL; 900 901 tree = &BTRFS_I(inode)->ordered_tree; 902 spin_lock_irq(&tree->lock); 903 node = tree_search(tree, file_offset); 904 if (!node) 905 goto out; 906 907 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 908 atomic_inc(&entry->refs); 909 out: 910 spin_unlock_irq(&tree->lock); 911 return entry; 912 } 913 914 /* 915 * After an extent is done, call this to conditionally update the on disk 916 * i_size. i_size is updated to cover any fully written part of the file. 917 */ 918 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 919 struct btrfs_ordered_extent *ordered) 920 { 921 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 922 u64 disk_i_size; 923 u64 new_i_size; 924 u64 i_size = i_size_read(inode); 925 struct rb_node *node; 926 struct rb_node *prev = NULL; 927 struct btrfs_ordered_extent *test; 928 int ret = 1; 929 930 spin_lock_irq(&tree->lock); 931 if (ordered) { 932 offset = entry_end(ordered); 933 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) 934 offset = min(offset, 935 ordered->file_offset + 936 ordered->truncated_len); 937 } else { 938 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); 939 } 940 disk_i_size = BTRFS_I(inode)->disk_i_size; 941 942 /* truncate file */ 943 if (disk_i_size > i_size) { 944 BTRFS_I(inode)->disk_i_size = i_size; 945 ret = 0; 946 goto out; 947 } 948 949 /* 950 * if the disk i_size is already at the inode->i_size, or 951 * this ordered extent is inside the disk i_size, we're done 952 */ 953 if (disk_i_size == i_size) 954 goto out; 955 956 /* 957 * We still need to update disk_i_size if outstanding_isize is greater 958 * than disk_i_size. 959 */ 960 if (offset <= disk_i_size && 961 (!ordered || ordered->outstanding_isize <= disk_i_size)) 962 goto out; 963 964 /* 965 * walk backward from this ordered extent to disk_i_size. 966 * if we find an ordered extent then we can't update disk i_size 967 * yet 968 */ 969 if (ordered) { 970 node = rb_prev(&ordered->rb_node); 971 } else { 972 prev = tree_search(tree, offset); 973 /* 974 * we insert file extents without involving ordered struct, 975 * so there should be no ordered struct cover this offset 976 */ 977 if (prev) { 978 test = rb_entry(prev, struct btrfs_ordered_extent, 979 rb_node); 980 BUG_ON(offset_in_entry(test, offset)); 981 } 982 node = prev; 983 } 984 for (; node; node = rb_prev(node)) { 985 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 986 987 /* We treat this entry as if it doesnt exist */ 988 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) 989 continue; 990 if (test->file_offset + test->len <= disk_i_size) 991 break; 992 if (test->file_offset >= i_size) 993 break; 994 if (entry_end(test) > disk_i_size) { 995 /* 996 * we don't update disk_i_size now, so record this 997 * undealt i_size. Or we will not know the real 998 * i_size. 999 */ 1000 if (test->outstanding_isize < offset) 1001 test->outstanding_isize = offset; 1002 if (ordered && 1003 ordered->outstanding_isize > 1004 test->outstanding_isize) 1005 test->outstanding_isize = 1006 ordered->outstanding_isize; 1007 goto out; 1008 } 1009 } 1010 new_i_size = min_t(u64, offset, i_size); 1011 1012 /* 1013 * Some ordered extents may completed before the current one, and 1014 * we hold the real i_size in ->outstanding_isize. 1015 */ 1016 if (ordered && ordered->outstanding_isize > new_i_size) 1017 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); 1018 BTRFS_I(inode)->disk_i_size = new_i_size; 1019 ret = 0; 1020 out: 1021 /* 1022 * We need to do this because we can't remove ordered extents until 1023 * after the i_disk_size has been updated and then the inode has been 1024 * updated to reflect the change, so we need to tell anybody who finds 1025 * this ordered extent that we've already done all the real work, we 1026 * just haven't completed all the other work. 1027 */ 1028 if (ordered) 1029 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); 1030 spin_unlock_irq(&tree->lock); 1031 return ret; 1032 } 1033 1034 /* 1035 * search the ordered extents for one corresponding to 'offset' and 1036 * try to find a checksum. This is used because we allow pages to 1037 * be reclaimed before their checksum is actually put into the btree 1038 */ 1039 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 1040 u32 *sum, int len) 1041 { 1042 struct btrfs_ordered_sum *ordered_sum; 1043 struct btrfs_ordered_extent *ordered; 1044 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 1045 unsigned long num_sectors; 1046 unsigned long i; 1047 u32 sectorsize = BTRFS_I(inode)->root->sectorsize; 1048 int index = 0; 1049 1050 ordered = btrfs_lookup_ordered_extent(inode, offset); 1051 if (!ordered) 1052 return 0; 1053 1054 spin_lock_irq(&tree->lock); 1055 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 1056 if (disk_bytenr >= ordered_sum->bytenr && 1057 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { 1058 i = (disk_bytenr - ordered_sum->bytenr) >> 1059 inode->i_sb->s_blocksize_bits; 1060 num_sectors = ordered_sum->len >> 1061 inode->i_sb->s_blocksize_bits; 1062 num_sectors = min_t(int, len - index, num_sectors - i); 1063 memcpy(sum + index, ordered_sum->sums + i, 1064 num_sectors); 1065 1066 index += (int)num_sectors; 1067 if (index == len) 1068 goto out; 1069 disk_bytenr += num_sectors * sectorsize; 1070 } 1071 } 1072 out: 1073 spin_unlock_irq(&tree->lock); 1074 btrfs_put_ordered_extent(ordered); 1075 return index; 1076 } 1077 1078 1079 /* 1080 * add a given inode to the list of inodes that must be fully on 1081 * disk before a transaction commit finishes. 1082 * 1083 * This basically gives us the ext3 style data=ordered mode, and it is mostly 1084 * used to make sure renamed files are fully on disk. 1085 * 1086 * It is a noop if the inode is already fully on disk. 1087 * 1088 * If trans is not null, we'll do a friendly check for a transaction that 1089 * is already flushing things and force the IO down ourselves. 1090 */ 1091 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 1092 struct btrfs_root *root, struct inode *inode) 1093 { 1094 struct btrfs_transaction *cur_trans = trans->transaction; 1095 u64 last_mod; 1096 1097 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); 1098 1099 /* 1100 * if this file hasn't been changed since the last transaction 1101 * commit, we can safely return without doing anything 1102 */ 1103 if (last_mod <= root->fs_info->last_trans_committed) 1104 return; 1105 1106 spin_lock(&root->fs_info->ordered_root_lock); 1107 if (list_empty(&BTRFS_I(inode)->ordered_operations)) { 1108 list_add_tail(&BTRFS_I(inode)->ordered_operations, 1109 &cur_trans->ordered_operations); 1110 } 1111 spin_unlock(&root->fs_info->ordered_root_lock); 1112 } 1113 1114 int __init ordered_data_init(void) 1115 { 1116 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1117 sizeof(struct btrfs_ordered_extent), 0, 1118 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 1119 NULL); 1120 if (!btrfs_ordered_extent_cache) 1121 return -ENOMEM; 1122 1123 return 0; 1124 } 1125 1126 void ordered_data_exit(void) 1127 { 1128 if (btrfs_ordered_extent_cache) 1129 kmem_cache_destroy(btrfs_ordered_extent_cache); 1130 } 1131