1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/gfp.h> 20 #include <linux/slab.h> 21 #include <linux/blkdev.h> 22 #include <linux/writeback.h> 23 #include <linux/pagevec.h> 24 #include "ctree.h" 25 #include "transaction.h" 26 #include "btrfs_inode.h" 27 #include "extent_io.h" 28 29 30 static u64 entry_end(struct btrfs_ordered_extent *entry) 31 { 32 if (entry->file_offset + entry->len < entry->file_offset) 33 return (u64)-1; 34 return entry->file_offset + entry->len; 35 } 36 37 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 38 struct rb_node *node) 39 { 40 struct rb_node ** p = &root->rb_node; 41 struct rb_node * parent = NULL; 42 struct btrfs_ordered_extent *entry; 43 44 while(*p) { 45 parent = *p; 46 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 47 48 if (file_offset < entry->file_offset) 49 p = &(*p)->rb_left; 50 else if (file_offset >= entry_end(entry)) 51 p = &(*p)->rb_right; 52 else 53 return parent; 54 } 55 56 rb_link_node(node, parent, p); 57 rb_insert_color(node, root); 58 return NULL; 59 } 60 61 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 62 struct rb_node **prev_ret) 63 { 64 struct rb_node * n = root->rb_node; 65 struct rb_node *prev = NULL; 66 struct rb_node *test; 67 struct btrfs_ordered_extent *entry; 68 struct btrfs_ordered_extent *prev_entry = NULL; 69 70 while(n) { 71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 72 prev = n; 73 prev_entry = entry; 74 75 if (file_offset < entry->file_offset) 76 n = n->rb_left; 77 else if (file_offset >= entry_end(entry)) 78 n = n->rb_right; 79 else 80 return n; 81 } 82 if (!prev_ret) 83 return NULL; 84 85 while(prev && file_offset >= entry_end(prev_entry)) { 86 test = rb_next(prev); 87 if (!test) 88 break; 89 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 90 rb_node); 91 if (file_offset < entry_end(prev_entry)) 92 break; 93 94 prev = test; 95 } 96 if (prev) 97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 98 rb_node); 99 while(prev && file_offset < entry_end(prev_entry)) { 100 test = rb_prev(prev); 101 if (!test) 102 break; 103 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 104 rb_node); 105 prev = test; 106 } 107 *prev_ret = prev; 108 return NULL; 109 } 110 111 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 112 { 113 if (file_offset < entry->file_offset || 114 entry->file_offset + entry->len <= file_offset) 115 return 0; 116 return 1; 117 } 118 119 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 120 u64 file_offset) 121 { 122 struct rb_root *root = &tree->tree; 123 struct rb_node *prev; 124 struct rb_node *ret; 125 struct btrfs_ordered_extent *entry; 126 127 if (tree->last) { 128 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 129 rb_node); 130 if (offset_in_entry(entry, file_offset)) 131 return tree->last; 132 } 133 ret = __tree_search(root, file_offset, &prev); 134 if (!ret) 135 ret = prev; 136 if (ret) 137 tree->last = ret; 138 return ret; 139 } 140 141 /* allocate and add a new ordered_extent into the per-inode tree. 142 * file_offset is the logical offset in the file 143 * 144 * start is the disk block number of an extent already reserved in the 145 * extent allocation tree 146 * 147 * len is the length of the extent 148 * 149 * This also sets the EXTENT_ORDERED bit on the range in the inode. 150 * 151 * The tree is given a single reference on the ordered extent that was 152 * inserted. 153 */ 154 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 155 u64 start, u64 len) 156 { 157 struct btrfs_ordered_inode_tree *tree; 158 struct rb_node *node; 159 struct btrfs_ordered_extent *entry; 160 161 tree = &BTRFS_I(inode)->ordered_tree; 162 entry = kzalloc(sizeof(*entry), GFP_NOFS); 163 if (!entry) 164 return -ENOMEM; 165 166 mutex_lock(&tree->mutex); 167 entry->file_offset = file_offset; 168 entry->start = start; 169 entry->len = len; 170 /* one ref for the tree */ 171 atomic_set(&entry->refs, 1); 172 init_waitqueue_head(&entry->wait); 173 INIT_LIST_HEAD(&entry->list); 174 175 node = tree_insert(&tree->tree, file_offset, 176 &entry->rb_node); 177 if (node) { 178 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 179 atomic_inc(&entry->refs); 180 } 181 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, 182 entry_end(entry) - 1, GFP_NOFS); 183 184 mutex_unlock(&tree->mutex); 185 BUG_ON(node); 186 return 0; 187 } 188 189 /* 190 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 191 * when an ordered extent is finished. If the list covers more than one 192 * ordered extent, it is split across multiples. 193 */ 194 int btrfs_add_ordered_sum(struct inode *inode, 195 struct btrfs_ordered_extent *entry, 196 struct btrfs_ordered_sum *sum) 197 { 198 struct btrfs_ordered_inode_tree *tree; 199 200 tree = &BTRFS_I(inode)->ordered_tree; 201 mutex_lock(&tree->mutex); 202 list_add_tail(&sum->list, &entry->list); 203 mutex_unlock(&tree->mutex); 204 return 0; 205 } 206 207 /* 208 * this is used to account for finished IO across a given range 209 * of the file. The IO should not span ordered extents. If 210 * a given ordered_extent is completely done, 1 is returned, otherwise 211 * 0. 212 * 213 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 214 * to make sure this function only returns 1 once for a given ordered extent. 215 */ 216 int btrfs_dec_test_ordered_pending(struct inode *inode, 217 u64 file_offset, u64 io_size) 218 { 219 struct btrfs_ordered_inode_tree *tree; 220 struct rb_node *node; 221 struct btrfs_ordered_extent *entry; 222 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 223 int ret; 224 225 tree = &BTRFS_I(inode)->ordered_tree; 226 mutex_lock(&tree->mutex); 227 clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, 228 GFP_NOFS); 229 node = tree_search(tree, file_offset); 230 if (!node) { 231 ret = 1; 232 goto out; 233 } 234 235 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 236 if (!offset_in_entry(entry, file_offset)) { 237 ret = 1; 238 goto out; 239 } 240 241 ret = test_range_bit(io_tree, entry->file_offset, 242 entry->file_offset + entry->len - 1, 243 EXTENT_ORDERED, 0); 244 if (ret == 0) 245 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 246 out: 247 mutex_unlock(&tree->mutex); 248 return ret == 0; 249 } 250 251 /* 252 * used to drop a reference on an ordered extent. This will free 253 * the extent if the last reference is dropped 254 */ 255 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 256 { 257 struct list_head *cur; 258 struct btrfs_ordered_sum *sum; 259 260 if (atomic_dec_and_test(&entry->refs)) { 261 while(!list_empty(&entry->list)) { 262 cur = entry->list.next; 263 sum = list_entry(cur, struct btrfs_ordered_sum, list); 264 list_del(&sum->list); 265 kfree(sum); 266 } 267 kfree(entry); 268 } 269 return 0; 270 } 271 272 /* 273 * remove an ordered extent from the tree. No references are dropped 274 * but, anyone waiting on this extent is woken up. 275 */ 276 int btrfs_remove_ordered_extent(struct inode *inode, 277 struct btrfs_ordered_extent *entry) 278 { 279 struct btrfs_ordered_inode_tree *tree; 280 struct rb_node *node; 281 282 tree = &BTRFS_I(inode)->ordered_tree; 283 mutex_lock(&tree->mutex); 284 node = &entry->rb_node; 285 rb_erase(node, &tree->tree); 286 tree->last = NULL; 287 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 288 mutex_unlock(&tree->mutex); 289 wake_up(&entry->wait); 290 return 0; 291 } 292 293 /* 294 * Used to start IO or wait for a given ordered extent to finish. 295 * 296 * If wait is one, this effectively waits on page writeback for all the pages 297 * in the extent, and it waits on the io completion code to insert 298 * metadata into the btree corresponding to the extent 299 */ 300 void btrfs_start_ordered_extent(struct inode *inode, 301 struct btrfs_ordered_extent *entry, 302 int wait) 303 { 304 u64 start = entry->file_offset; 305 u64 end = start + entry->len - 1; 306 307 /* 308 * pages in the range can be dirty, clean or writeback. We 309 * start IO on any dirty ones so the wait doesn't stall waiting 310 * for pdflush to find them 311 */ 312 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); 313 if (wait) 314 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 315 &entry->flags)); 316 } 317 318 /* 319 * Used to wait on ordered extents across a large range of bytes. 320 */ 321 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 322 { 323 u64 end; 324 u64 orig_end; 325 u64 wait_end; 326 struct btrfs_ordered_extent *ordered; 327 328 if (start + len < start) { 329 orig_end = INT_LIMIT(loff_t); 330 } else { 331 orig_end = start + len - 1; 332 if (orig_end > INT_LIMIT(loff_t)) 333 orig_end = INT_LIMIT(loff_t); 334 } 335 wait_end = orig_end; 336 again: 337 /* start IO across the range first to instantiate any delalloc 338 * extents 339 */ 340 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); 341 342 btrfs_wait_on_page_writeback_range(inode->i_mapping, 343 start >> PAGE_CACHE_SHIFT, 344 orig_end >> PAGE_CACHE_SHIFT); 345 346 end = orig_end; 347 while(1) { 348 ordered = btrfs_lookup_first_ordered_extent(inode, end); 349 if (!ordered) { 350 break; 351 } 352 if (ordered->file_offset > orig_end) { 353 btrfs_put_ordered_extent(ordered); 354 break; 355 } 356 if (ordered->file_offset + ordered->len < start) { 357 btrfs_put_ordered_extent(ordered); 358 break; 359 } 360 btrfs_start_ordered_extent(inode, ordered, 1); 361 end = ordered->file_offset; 362 btrfs_put_ordered_extent(ordered); 363 if (end == 0 || end == start) 364 break; 365 end--; 366 } 367 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, 368 EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { 369 printk("inode %lu still ordered or delalloc after wait " 370 "%llu %llu\n", inode->i_ino, 371 (unsigned long long)start, 372 (unsigned long long)orig_end); 373 goto again; 374 } 375 } 376 377 /* 378 * find an ordered extent corresponding to file_offset. return NULL if 379 * nothing is found, otherwise take a reference on the extent and return it 380 */ 381 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 382 u64 file_offset) 383 { 384 struct btrfs_ordered_inode_tree *tree; 385 struct rb_node *node; 386 struct btrfs_ordered_extent *entry = NULL; 387 388 tree = &BTRFS_I(inode)->ordered_tree; 389 mutex_lock(&tree->mutex); 390 node = tree_search(tree, file_offset); 391 if (!node) 392 goto out; 393 394 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 395 if (!offset_in_entry(entry, file_offset)) 396 entry = NULL; 397 if (entry) 398 atomic_inc(&entry->refs); 399 out: 400 mutex_unlock(&tree->mutex); 401 return entry; 402 } 403 404 /* 405 * lookup and return any extent before 'file_offset'. NULL is returned 406 * if none is found 407 */ 408 struct btrfs_ordered_extent * 409 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) 410 { 411 struct btrfs_ordered_inode_tree *tree; 412 struct rb_node *node; 413 struct btrfs_ordered_extent *entry = NULL; 414 415 tree = &BTRFS_I(inode)->ordered_tree; 416 mutex_lock(&tree->mutex); 417 node = tree_search(tree, file_offset); 418 if (!node) 419 goto out; 420 421 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 422 atomic_inc(&entry->refs); 423 out: 424 mutex_unlock(&tree->mutex); 425 return entry; 426 } 427 428 /* 429 * After an extent is done, call this to conditionally update the on disk 430 * i_size. i_size is updated to cover any fully written part of the file. 431 */ 432 int btrfs_ordered_update_i_size(struct inode *inode, 433 struct btrfs_ordered_extent *ordered) 434 { 435 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 436 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 437 u64 disk_i_size; 438 u64 new_i_size; 439 u64 i_size_test; 440 struct rb_node *node; 441 struct btrfs_ordered_extent *test; 442 443 mutex_lock(&tree->mutex); 444 disk_i_size = BTRFS_I(inode)->disk_i_size; 445 446 /* 447 * if the disk i_size is already at the inode->i_size, or 448 * this ordered extent is inside the disk i_size, we're done 449 */ 450 if (disk_i_size >= inode->i_size || 451 ordered->file_offset + ordered->len <= disk_i_size) { 452 goto out; 453 } 454 455 /* 456 * we can't update the disk_isize if there are delalloc bytes 457 * between disk_i_size and this ordered extent 458 */ 459 if (test_range_bit(io_tree, disk_i_size, 460 ordered->file_offset + ordered->len - 1, 461 EXTENT_DELALLOC, 0)) { 462 goto out; 463 } 464 /* 465 * walk backward from this ordered extent to disk_i_size. 466 * if we find an ordered extent then we can't update disk i_size 467 * yet 468 */ 469 node = &ordered->rb_node; 470 while(1) { 471 node = rb_prev(node); 472 if (!node) 473 break; 474 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 475 if (test->file_offset + test->len <= disk_i_size) 476 break; 477 if (test->file_offset >= inode->i_size) 478 break; 479 if (test->file_offset >= disk_i_size) 480 goto out; 481 } 482 new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); 483 484 /* 485 * at this point, we know we can safely update i_size to at least 486 * the offset from this ordered extent. But, we need to 487 * walk forward and see if ios from higher up in the file have 488 * finished. 489 */ 490 node = rb_next(&ordered->rb_node); 491 i_size_test = 0; 492 if (node) { 493 /* 494 * do we have an area where IO might have finished 495 * between our ordered extent and the next one. 496 */ 497 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 498 if (test->file_offset > entry_end(ordered)) { 499 i_size_test = test->file_offset - 1; 500 } 501 } else { 502 i_size_test = i_size_read(inode); 503 } 504 505 /* 506 * i_size_test is the end of a region after this ordered 507 * extent where there are no ordered extents. As long as there 508 * are no delalloc bytes in this area, it is safe to update 509 * disk_i_size to the end of the region. 510 */ 511 if (i_size_test > entry_end(ordered) && 512 !test_range_bit(io_tree, entry_end(ordered), i_size_test, 513 EXTENT_DELALLOC, 0)) { 514 new_i_size = min_t(u64, i_size_test, i_size_read(inode)); 515 } 516 BTRFS_I(inode)->disk_i_size = new_i_size; 517 out: 518 mutex_unlock(&tree->mutex); 519 return 0; 520 } 521 522 /* 523 * search the ordered extents for one corresponding to 'offset' and 524 * try to find a checksum. This is used because we allow pages to 525 * be reclaimed before their checksum is actually put into the btree 526 */ 527 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) 528 { 529 struct btrfs_ordered_sum *ordered_sum; 530 struct btrfs_sector_sum *sector_sums; 531 struct btrfs_ordered_extent *ordered; 532 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 533 struct list_head *cur; 534 unsigned long num_sectors; 535 unsigned long i; 536 u32 sectorsize = BTRFS_I(inode)->root->sectorsize; 537 int ret = 1; 538 539 ordered = btrfs_lookup_ordered_extent(inode, offset); 540 if (!ordered) 541 return 1; 542 543 mutex_lock(&tree->mutex); 544 list_for_each_prev(cur, &ordered->list) { 545 ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); 546 if (offset >= ordered_sum->file_offset) { 547 num_sectors = ordered_sum->len / sectorsize; 548 sector_sums = ordered_sum->sums; 549 for (i = 0; i < num_sectors; i++) { 550 if (sector_sums[i].offset == offset) { 551 *sum = sector_sums[i].sum; 552 ret = 0; 553 goto out; 554 } 555 } 556 } 557 } 558 out: 559 mutex_unlock(&tree->mutex); 560 return ret; 561 } 562 563 564 /** 565 * taken from mm/filemap.c because it isn't exported 566 * 567 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 568 * @mapping: address space structure to write 569 * @start: offset in bytes where the range starts 570 * @end: offset in bytes where the range ends (inclusive) 571 * @sync_mode: enable synchronous operation 572 * 573 * Start writeback against all of a mapping's dirty pages that lie 574 * within the byte offsets <start, end> inclusive. 575 * 576 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 577 * opposed to a regular memory cleansing writeback. The difference between 578 * these two operations is that if a dirty page/buffer is encountered, it must 579 * be waited upon, and not just skipped over. 580 */ 581 int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, 582 loff_t end, int sync_mode) 583 { 584 struct writeback_control wbc = { 585 .sync_mode = sync_mode, 586 .nr_to_write = mapping->nrpages * 2, 587 .range_start = start, 588 .range_end = end, 589 .for_writepages = 1, 590 }; 591 return btrfs_writepages(mapping, &wbc); 592 } 593 594 /** 595 * taken from mm/filemap.c because it isn't exported 596 * 597 * wait_on_page_writeback_range - wait for writeback to complete 598 * @mapping: target address_space 599 * @start: beginning page index 600 * @end: ending page index 601 * 602 * Wait for writeback to complete against pages indexed by start->end 603 * inclusive 604 */ 605 int btrfs_wait_on_page_writeback_range(struct address_space *mapping, 606 pgoff_t start, pgoff_t end) 607 { 608 struct pagevec pvec; 609 int nr_pages; 610 int ret = 0; 611 pgoff_t index; 612 613 if (end < start) 614 return 0; 615 616 pagevec_init(&pvec, 0); 617 index = start; 618 while ((index <= end) && 619 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 620 PAGECACHE_TAG_WRITEBACK, 621 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 622 unsigned i; 623 624 for (i = 0; i < nr_pages; i++) { 625 struct page *page = pvec.pages[i]; 626 627 /* until radix tree lookup accepts end_index */ 628 if (page->index > end) 629 continue; 630 631 wait_on_page_writeback(page); 632 if (PageError(page)) 633 ret = -EIO; 634 } 635 pagevec_release(&pvec); 636 cond_resched(); 637 } 638 639 /* Check for outstanding write errors */ 640 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 641 ret = -ENOSPC; 642 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 643 ret = -EIO; 644 645 return ret; 646 } 647