1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/smp_lock.h> 30 #include <linux/backing-dev.h> 31 #include <linux/mpage.h> 32 #include <linux/swap.h> 33 #include <linux/writeback.h> 34 #include <linux/bit_spinlock.h> 35 #include <linux/pagevec.h> 36 #include "compat.h" 37 #include "ctree.h" 38 #include "disk-io.h" 39 #include "transaction.h" 40 #include "btrfs_inode.h" 41 #include "volumes.h" 42 #include "ordered-data.h" 43 #include "compression.h" 44 #include "extent_io.h" 45 #include "extent_map.h" 46 47 struct compressed_bio { 48 /* number of bios pending for this compressed extent */ 49 atomic_t pending_bios; 50 51 /* the pages with the compressed data on them */ 52 struct page **compressed_pages; 53 54 /* inode that owns this data */ 55 struct inode *inode; 56 57 /* starting offset in the inode for our pages */ 58 u64 start; 59 60 /* number of bytes in the inode we're working on */ 61 unsigned long len; 62 63 /* number of bytes on disk */ 64 unsigned long compressed_len; 65 66 /* number of compressed pages in the array */ 67 unsigned long nr_pages; 68 69 /* IO errors */ 70 int errors; 71 int mirror_num; 72 73 /* for reads, this is the bio we are copying the data into */ 74 struct bio *orig_bio; 75 76 /* 77 * the start of a variable length array of checksums only 78 * used by reads 79 */ 80 u32 sums; 81 }; 82 83 static inline int compressed_bio_size(struct btrfs_root *root, 84 unsigned long disk_size) 85 { 86 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 87 return sizeof(struct compressed_bio) + 88 ((disk_size + root->sectorsize - 1) / root->sectorsize) * 89 csum_size; 90 } 91 92 static struct bio *compressed_bio_alloc(struct block_device *bdev, 93 u64 first_byte, gfp_t gfp_flags) 94 { 95 struct bio *bio; 96 int nr_vecs; 97 98 nr_vecs = bio_get_nr_vecs(bdev); 99 bio = bio_alloc(gfp_flags, nr_vecs); 100 101 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 102 while (!bio && (nr_vecs /= 2)) 103 bio = bio_alloc(gfp_flags, nr_vecs); 104 } 105 106 if (bio) { 107 bio->bi_size = 0; 108 bio->bi_bdev = bdev; 109 bio->bi_sector = first_byte >> 9; 110 } 111 return bio; 112 } 113 114 static int check_compressed_csum(struct inode *inode, 115 struct compressed_bio *cb, 116 u64 disk_start) 117 { 118 int ret; 119 struct btrfs_root *root = BTRFS_I(inode)->root; 120 struct page *page; 121 unsigned long i; 122 char *kaddr; 123 u32 csum; 124 u32 *cb_sum = &cb->sums; 125 126 if (btrfs_test_flag(inode, NODATASUM)) 127 return 0; 128 129 for (i = 0; i < cb->nr_pages; i++) { 130 page = cb->compressed_pages[i]; 131 csum = ~(u32)0; 132 133 kaddr = kmap_atomic(page, KM_USER0); 134 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 135 btrfs_csum_final(csum, (char *)&csum); 136 kunmap_atomic(kaddr, KM_USER0); 137 138 if (csum != *cb_sum) { 139 printk(KERN_INFO "btrfs csum failed ino %lu " 140 "extent %llu csum %u " 141 "wanted %u mirror %d\n", inode->i_ino, 142 (unsigned long long)disk_start, 143 csum, *cb_sum, cb->mirror_num); 144 ret = -EIO; 145 goto fail; 146 } 147 cb_sum++; 148 149 } 150 ret = 0; 151 fail: 152 return ret; 153 } 154 155 /* when we finish reading compressed pages from the disk, we 156 * decompress them and then run the bio end_io routines on the 157 * decompressed pages (in the inode address space). 158 * 159 * This allows the checksumming and other IO error handling routines 160 * to work normally 161 * 162 * The compressed pages are freed here, and it must be run 163 * in process context 164 */ 165 static void end_compressed_bio_read(struct bio *bio, int err) 166 { 167 struct extent_io_tree *tree; 168 struct compressed_bio *cb = bio->bi_private; 169 struct inode *inode; 170 struct page *page; 171 unsigned long index; 172 int ret; 173 174 if (err) 175 cb->errors = 1; 176 177 /* if there are more bios still pending for this compressed 178 * extent, just exit 179 */ 180 if (!atomic_dec_and_test(&cb->pending_bios)) 181 goto out; 182 183 inode = cb->inode; 184 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 185 if (ret) 186 goto csum_failed; 187 188 /* ok, we're the last bio for this extent, lets start 189 * the decompression. 190 */ 191 tree = &BTRFS_I(inode)->io_tree; 192 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, 193 cb->start, 194 cb->orig_bio->bi_io_vec, 195 cb->orig_bio->bi_vcnt, 196 cb->compressed_len); 197 csum_failed: 198 if (ret) 199 cb->errors = 1; 200 201 /* release the compressed pages */ 202 index = 0; 203 for (index = 0; index < cb->nr_pages; index++) { 204 page = cb->compressed_pages[index]; 205 page->mapping = NULL; 206 page_cache_release(page); 207 } 208 209 /* do io completion on the original bio */ 210 if (cb->errors) { 211 bio_io_error(cb->orig_bio); 212 } else { 213 int bio_index = 0; 214 struct bio_vec *bvec = cb->orig_bio->bi_io_vec; 215 216 /* 217 * we have verified the checksum already, set page 218 * checked so the end_io handlers know about it 219 */ 220 while (bio_index < cb->orig_bio->bi_vcnt) { 221 SetPageChecked(bvec->bv_page); 222 bvec++; 223 bio_index++; 224 } 225 bio_endio(cb->orig_bio, 0); 226 } 227 228 /* finally free the cb struct */ 229 kfree(cb->compressed_pages); 230 kfree(cb); 231 out: 232 bio_put(bio); 233 } 234 235 /* 236 * Clear the writeback bits on all of the file 237 * pages for a compressed write 238 */ 239 static noinline int end_compressed_writeback(struct inode *inode, u64 start, 240 unsigned long ram_size) 241 { 242 unsigned long index = start >> PAGE_CACHE_SHIFT; 243 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 244 struct page *pages[16]; 245 unsigned long nr_pages = end_index - index + 1; 246 int i; 247 int ret; 248 249 while (nr_pages > 0) { 250 ret = find_get_pages_contig(inode->i_mapping, index, 251 min_t(unsigned long, 252 nr_pages, ARRAY_SIZE(pages)), pages); 253 if (ret == 0) { 254 nr_pages -= 1; 255 index += 1; 256 continue; 257 } 258 for (i = 0; i < ret; i++) { 259 end_page_writeback(pages[i]); 260 page_cache_release(pages[i]); 261 } 262 nr_pages -= ret; 263 index += ret; 264 } 265 /* the inode may be gone now */ 266 return 0; 267 } 268 269 /* 270 * do the cleanup once all the compressed pages hit the disk. 271 * This will clear writeback on the file pages and free the compressed 272 * pages. 273 * 274 * This also calls the writeback end hooks for the file pages so that 275 * metadata and checksums can be updated in the file. 276 */ 277 static void end_compressed_bio_write(struct bio *bio, int err) 278 { 279 struct extent_io_tree *tree; 280 struct compressed_bio *cb = bio->bi_private; 281 struct inode *inode; 282 struct page *page; 283 unsigned long index; 284 285 if (err) 286 cb->errors = 1; 287 288 /* if there are more bios still pending for this compressed 289 * extent, just exit 290 */ 291 if (!atomic_dec_and_test(&cb->pending_bios)) 292 goto out; 293 294 /* ok, we're the last bio for this extent, step one is to 295 * call back into the FS and do all the end_io operations 296 */ 297 inode = cb->inode; 298 tree = &BTRFS_I(inode)->io_tree; 299 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 300 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 301 cb->start, 302 cb->start + cb->len - 1, 303 NULL, 1); 304 cb->compressed_pages[0]->mapping = NULL; 305 306 end_compressed_writeback(inode, cb->start, cb->len); 307 /* note, our inode could be gone now */ 308 309 /* 310 * release the compressed pages, these came from alloc_page and 311 * are not attached to the inode at all 312 */ 313 index = 0; 314 for (index = 0; index < cb->nr_pages; index++) { 315 page = cb->compressed_pages[index]; 316 page->mapping = NULL; 317 page_cache_release(page); 318 } 319 320 /* finally free the cb struct */ 321 kfree(cb->compressed_pages); 322 kfree(cb); 323 out: 324 bio_put(bio); 325 } 326 327 /* 328 * worker function to build and submit bios for previously compressed pages. 329 * The corresponding pages in the inode should be marked for writeback 330 * and the compressed pages should have a reference on them for dropping 331 * when the IO is complete. 332 * 333 * This also checksums the file bytes and gets things ready for 334 * the end io hooks. 335 */ 336 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 337 unsigned long len, u64 disk_start, 338 unsigned long compressed_len, 339 struct page **compressed_pages, 340 unsigned long nr_pages) 341 { 342 struct bio *bio = NULL; 343 struct btrfs_root *root = BTRFS_I(inode)->root; 344 struct compressed_bio *cb; 345 unsigned long bytes_left; 346 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 347 int page_index = 0; 348 struct page *page; 349 u64 first_byte = disk_start; 350 struct block_device *bdev; 351 int ret; 352 353 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 354 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 355 atomic_set(&cb->pending_bios, 0); 356 cb->errors = 0; 357 cb->inode = inode; 358 cb->start = start; 359 cb->len = len; 360 cb->mirror_num = 0; 361 cb->compressed_pages = compressed_pages; 362 cb->compressed_len = compressed_len; 363 cb->orig_bio = NULL; 364 cb->nr_pages = nr_pages; 365 366 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 367 368 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 369 bio->bi_private = cb; 370 bio->bi_end_io = end_compressed_bio_write; 371 atomic_inc(&cb->pending_bios); 372 373 /* create and submit bios for the compressed pages */ 374 bytes_left = compressed_len; 375 for (page_index = 0; page_index < cb->nr_pages; page_index++) { 376 page = compressed_pages[page_index]; 377 page->mapping = inode->i_mapping; 378 if (bio->bi_size) 379 ret = io_tree->ops->merge_bio_hook(page, 0, 380 PAGE_CACHE_SIZE, 381 bio, 0); 382 else 383 ret = 0; 384 385 page->mapping = NULL; 386 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 387 PAGE_CACHE_SIZE) { 388 bio_get(bio); 389 390 /* 391 * inc the count before we submit the bio so 392 * we know the end IO handler won't happen before 393 * we inc the count. Otherwise, the cb might get 394 * freed before we're done setting it up 395 */ 396 atomic_inc(&cb->pending_bios); 397 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 398 BUG_ON(ret); 399 400 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 401 BUG_ON(ret); 402 403 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 404 BUG_ON(ret); 405 406 bio_put(bio); 407 408 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 409 bio->bi_private = cb; 410 bio->bi_end_io = end_compressed_bio_write; 411 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 412 } 413 if (bytes_left < PAGE_CACHE_SIZE) { 414 printk("bytes left %lu compress len %lu nr %lu\n", 415 bytes_left, cb->compressed_len, cb->nr_pages); 416 } 417 bytes_left -= PAGE_CACHE_SIZE; 418 first_byte += PAGE_CACHE_SIZE; 419 cond_resched(); 420 } 421 bio_get(bio); 422 423 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 424 BUG_ON(ret); 425 426 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 427 BUG_ON(ret); 428 429 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 430 BUG_ON(ret); 431 432 bio_put(bio); 433 return 0; 434 } 435 436 static noinline int add_ra_bio_pages(struct inode *inode, 437 u64 compressed_end, 438 struct compressed_bio *cb) 439 { 440 unsigned long end_index; 441 unsigned long page_index; 442 u64 last_offset; 443 u64 isize = i_size_read(inode); 444 int ret; 445 struct page *page; 446 unsigned long nr_pages = 0; 447 struct extent_map *em; 448 struct address_space *mapping = inode->i_mapping; 449 struct pagevec pvec; 450 struct extent_map_tree *em_tree; 451 struct extent_io_tree *tree; 452 u64 end; 453 int misses = 0; 454 455 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 456 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 457 em_tree = &BTRFS_I(inode)->extent_tree; 458 tree = &BTRFS_I(inode)->io_tree; 459 460 if (isize == 0) 461 return 0; 462 463 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 464 465 pagevec_init(&pvec, 0); 466 while (last_offset < compressed_end) { 467 page_index = last_offset >> PAGE_CACHE_SHIFT; 468 469 if (page_index > end_index) 470 break; 471 472 rcu_read_lock(); 473 page = radix_tree_lookup(&mapping->page_tree, page_index); 474 rcu_read_unlock(); 475 if (page) { 476 misses++; 477 if (misses > 4) 478 break; 479 goto next; 480 } 481 482 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); 483 if (!page) 484 break; 485 486 page->index = page_index; 487 /* 488 * what we want to do here is call add_to_page_cache_lru, 489 * but that isn't exported, so we reproduce it here 490 */ 491 if (add_to_page_cache(page, mapping, 492 page->index, GFP_NOFS)) { 493 page_cache_release(page); 494 goto next; 495 } 496 497 /* open coding of lru_cache_add, also not exported */ 498 page_cache_get(page); 499 if (!pagevec_add(&pvec, page)) 500 __pagevec_lru_add_file(&pvec); 501 502 end = last_offset + PAGE_CACHE_SIZE - 1; 503 /* 504 * at this point, we have a locked page in the page cache 505 * for these bytes in the file. But, we have to make 506 * sure they map to this compressed extent on disk. 507 */ 508 set_page_extent_mapped(page); 509 lock_extent(tree, last_offset, end, GFP_NOFS); 510 spin_lock(&em_tree->lock); 511 em = lookup_extent_mapping(em_tree, last_offset, 512 PAGE_CACHE_SIZE); 513 spin_unlock(&em_tree->lock); 514 515 if (!em || last_offset < em->start || 516 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 517 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 518 free_extent_map(em); 519 unlock_extent(tree, last_offset, end, GFP_NOFS); 520 unlock_page(page); 521 page_cache_release(page); 522 break; 523 } 524 free_extent_map(em); 525 526 if (page->index == end_index) { 527 char *userpage; 528 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 529 530 if (zero_offset) { 531 int zeros; 532 zeros = PAGE_CACHE_SIZE - zero_offset; 533 userpage = kmap_atomic(page, KM_USER0); 534 memset(userpage + zero_offset, 0, zeros); 535 flush_dcache_page(page); 536 kunmap_atomic(userpage, KM_USER0); 537 } 538 } 539 540 ret = bio_add_page(cb->orig_bio, page, 541 PAGE_CACHE_SIZE, 0); 542 543 if (ret == PAGE_CACHE_SIZE) { 544 nr_pages++; 545 page_cache_release(page); 546 } else { 547 unlock_extent(tree, last_offset, end, GFP_NOFS); 548 unlock_page(page); 549 page_cache_release(page); 550 break; 551 } 552 next: 553 last_offset += PAGE_CACHE_SIZE; 554 } 555 if (pagevec_count(&pvec)) 556 __pagevec_lru_add_file(&pvec); 557 return 0; 558 } 559 560 /* 561 * for a compressed read, the bio we get passed has all the inode pages 562 * in it. We don't actually do IO on those pages but allocate new ones 563 * to hold the compressed pages on disk. 564 * 565 * bio->bi_sector points to the compressed extent on disk 566 * bio->bi_io_vec points to all of the inode pages 567 * bio->bi_vcnt is a count of pages 568 * 569 * After the compressed pages are read, we copy the bytes into the 570 * bio we were passed and then call the bio end_io calls 571 */ 572 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 573 int mirror_num, unsigned long bio_flags) 574 { 575 struct extent_io_tree *tree; 576 struct extent_map_tree *em_tree; 577 struct compressed_bio *cb; 578 struct btrfs_root *root = BTRFS_I(inode)->root; 579 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 580 unsigned long compressed_len; 581 unsigned long nr_pages; 582 unsigned long page_index; 583 struct page *page; 584 struct block_device *bdev; 585 struct bio *comp_bio; 586 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 587 u64 em_len; 588 u64 em_start; 589 struct extent_map *em; 590 int ret; 591 u32 *sums; 592 593 tree = &BTRFS_I(inode)->io_tree; 594 em_tree = &BTRFS_I(inode)->extent_tree; 595 596 /* we need the actual starting offset of this extent in the file */ 597 spin_lock(&em_tree->lock); 598 em = lookup_extent_mapping(em_tree, 599 page_offset(bio->bi_io_vec->bv_page), 600 PAGE_CACHE_SIZE); 601 spin_unlock(&em_tree->lock); 602 603 compressed_len = em->block_len; 604 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 605 atomic_set(&cb->pending_bios, 0); 606 cb->errors = 0; 607 cb->inode = inode; 608 cb->mirror_num = mirror_num; 609 sums = &cb->sums; 610 611 cb->start = em->orig_start; 612 em_len = em->len; 613 em_start = em->start; 614 615 free_extent_map(em); 616 em = NULL; 617 618 cb->len = uncompressed_len; 619 cb->compressed_len = compressed_len; 620 cb->orig_bio = bio; 621 622 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / 623 PAGE_CACHE_SIZE; 624 cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, 625 GFP_NOFS); 626 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 627 628 for (page_index = 0; page_index < nr_pages; page_index++) { 629 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | 630 __GFP_HIGHMEM); 631 } 632 cb->nr_pages = nr_pages; 633 634 add_ra_bio_pages(inode, em_start + em_len, cb); 635 636 /* include any pages we added in add_ra-bio_pages */ 637 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 638 cb->len = uncompressed_len; 639 640 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 641 comp_bio->bi_private = cb; 642 comp_bio->bi_end_io = end_compressed_bio_read; 643 atomic_inc(&cb->pending_bios); 644 645 for (page_index = 0; page_index < nr_pages; page_index++) { 646 page = cb->compressed_pages[page_index]; 647 page->mapping = inode->i_mapping; 648 page->index = em_start >> PAGE_CACHE_SHIFT; 649 650 if (comp_bio->bi_size) 651 ret = tree->ops->merge_bio_hook(page, 0, 652 PAGE_CACHE_SIZE, 653 comp_bio, 0); 654 else 655 ret = 0; 656 657 page->mapping = NULL; 658 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 659 PAGE_CACHE_SIZE) { 660 bio_get(comp_bio); 661 662 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 663 BUG_ON(ret); 664 665 /* 666 * inc the count before we submit the bio so 667 * we know the end IO handler won't happen before 668 * we inc the count. Otherwise, the cb might get 669 * freed before we're done setting it up 670 */ 671 atomic_inc(&cb->pending_bios); 672 673 if (!btrfs_test_flag(inode, NODATASUM)) { 674 btrfs_lookup_bio_sums(root, inode, comp_bio, 675 sums); 676 } 677 sums += (comp_bio->bi_size + root->sectorsize - 1) / 678 root->sectorsize; 679 680 ret = btrfs_map_bio(root, READ, comp_bio, 681 mirror_num, 0); 682 BUG_ON(ret); 683 684 bio_put(comp_bio); 685 686 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 687 GFP_NOFS); 688 comp_bio->bi_private = cb; 689 comp_bio->bi_end_io = end_compressed_bio_read; 690 691 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 692 } 693 cur_disk_byte += PAGE_CACHE_SIZE; 694 } 695 bio_get(comp_bio); 696 697 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 698 BUG_ON(ret); 699 700 if (!btrfs_test_flag(inode, NODATASUM)) 701 btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 702 703 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 704 BUG_ON(ret); 705 706 bio_put(comp_bio); 707 return 0; 708 } 709