1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/pagevec.h> 35 #include "compat.h" 36 #include "ctree.h" 37 #include "disk-io.h" 38 #include "transaction.h" 39 #include "btrfs_inode.h" 40 #include "volumes.h" 41 #include "ordered-data.h" 42 #include "compression.h" 43 #include "extent_io.h" 44 #include "extent_map.h" 45 46 struct compressed_bio { 47 /* number of bios pending for this compressed extent */ 48 atomic_t pending_bios; 49 50 /* the pages with the compressed data on them */ 51 struct page **compressed_pages; 52 53 /* inode that owns this data */ 54 struct inode *inode; 55 56 /* starting offset in the inode for our pages */ 57 u64 start; 58 59 /* number of bytes in the inode we're working on */ 60 unsigned long len; 61 62 /* number of bytes on disk */ 63 unsigned long compressed_len; 64 65 /* number of compressed pages in the array */ 66 unsigned long nr_pages; 67 68 /* IO errors */ 69 int errors; 70 int mirror_num; 71 72 /* for reads, this is the bio we are copying the data into */ 73 struct bio *orig_bio; 74 75 /* 76 * the start of a variable length array of checksums only 77 * used by reads 78 */ 79 u32 sums; 80 }; 81 82 static inline int compressed_bio_size(struct btrfs_root *root, 83 unsigned long disk_size) 84 { 85 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 86 return sizeof(struct compressed_bio) + 87 ((disk_size + root->sectorsize - 1) / root->sectorsize) * 88 csum_size; 89 } 90 91 static struct bio *compressed_bio_alloc(struct block_device *bdev, 92 u64 first_byte, gfp_t gfp_flags) 93 { 94 struct bio *bio; 95 int nr_vecs; 96 97 nr_vecs = bio_get_nr_vecs(bdev); 98 bio = bio_alloc(gfp_flags, nr_vecs); 99 100 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 101 while (!bio && (nr_vecs /= 2)) 102 bio = bio_alloc(gfp_flags, nr_vecs); 103 } 104 105 if (bio) { 106 bio->bi_size = 0; 107 bio->bi_bdev = bdev; 108 bio->bi_sector = first_byte >> 9; 109 } 110 return bio; 111 } 112 113 static int check_compressed_csum(struct inode *inode, 114 struct compressed_bio *cb, 115 u64 disk_start) 116 { 117 int ret; 118 struct btrfs_root *root = BTRFS_I(inode)->root; 119 struct page *page; 120 unsigned long i; 121 char *kaddr; 122 u32 csum; 123 u32 *cb_sum = &cb->sums; 124 125 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 126 return 0; 127 128 for (i = 0; i < cb->nr_pages; i++) { 129 page = cb->compressed_pages[i]; 130 csum = ~(u32)0; 131 132 kaddr = kmap_atomic(page, KM_USER0); 133 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 134 btrfs_csum_final(csum, (char *)&csum); 135 kunmap_atomic(kaddr, KM_USER0); 136 137 if (csum != *cb_sum) { 138 printk(KERN_INFO "btrfs csum failed ino %lu " 139 "extent %llu csum %u " 140 "wanted %u mirror %d\n", inode->i_ino, 141 (unsigned long long)disk_start, 142 csum, *cb_sum, cb->mirror_num); 143 ret = -EIO; 144 goto fail; 145 } 146 cb_sum++; 147 148 } 149 ret = 0; 150 fail: 151 return ret; 152 } 153 154 /* when we finish reading compressed pages from the disk, we 155 * decompress them and then run the bio end_io routines on the 156 * decompressed pages (in the inode address space). 157 * 158 * This allows the checksumming and other IO error handling routines 159 * to work normally 160 * 161 * The compressed pages are freed here, and it must be run 162 * in process context 163 */ 164 static void end_compressed_bio_read(struct bio *bio, int err) 165 { 166 struct extent_io_tree *tree; 167 struct compressed_bio *cb = bio->bi_private; 168 struct inode *inode; 169 struct page *page; 170 unsigned long index; 171 int ret; 172 173 if (err) 174 cb->errors = 1; 175 176 /* if there are more bios still pending for this compressed 177 * extent, just exit 178 */ 179 if (!atomic_dec_and_test(&cb->pending_bios)) 180 goto out; 181 182 inode = cb->inode; 183 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 184 if (ret) 185 goto csum_failed; 186 187 /* ok, we're the last bio for this extent, lets start 188 * the decompression. 189 */ 190 tree = &BTRFS_I(inode)->io_tree; 191 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, 192 cb->start, 193 cb->orig_bio->bi_io_vec, 194 cb->orig_bio->bi_vcnt, 195 cb->compressed_len); 196 csum_failed: 197 if (ret) 198 cb->errors = 1; 199 200 /* release the compressed pages */ 201 index = 0; 202 for (index = 0; index < cb->nr_pages; index++) { 203 page = cb->compressed_pages[index]; 204 page->mapping = NULL; 205 page_cache_release(page); 206 } 207 208 /* do io completion on the original bio */ 209 if (cb->errors) { 210 bio_io_error(cb->orig_bio); 211 } else { 212 int bio_index = 0; 213 struct bio_vec *bvec = cb->orig_bio->bi_io_vec; 214 215 /* 216 * we have verified the checksum already, set page 217 * checked so the end_io handlers know about it 218 */ 219 while (bio_index < cb->orig_bio->bi_vcnt) { 220 SetPageChecked(bvec->bv_page); 221 bvec++; 222 bio_index++; 223 } 224 bio_endio(cb->orig_bio, 0); 225 } 226 227 /* finally free the cb struct */ 228 kfree(cb->compressed_pages); 229 kfree(cb); 230 out: 231 bio_put(bio); 232 } 233 234 /* 235 * Clear the writeback bits on all of the file 236 * pages for a compressed write 237 */ 238 static noinline int end_compressed_writeback(struct inode *inode, u64 start, 239 unsigned long ram_size) 240 { 241 unsigned long index = start >> PAGE_CACHE_SHIFT; 242 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 243 struct page *pages[16]; 244 unsigned long nr_pages = end_index - index + 1; 245 int i; 246 int ret; 247 248 while (nr_pages > 0) { 249 ret = find_get_pages_contig(inode->i_mapping, index, 250 min_t(unsigned long, 251 nr_pages, ARRAY_SIZE(pages)), pages); 252 if (ret == 0) { 253 nr_pages -= 1; 254 index += 1; 255 continue; 256 } 257 for (i = 0; i < ret; i++) { 258 end_page_writeback(pages[i]); 259 page_cache_release(pages[i]); 260 } 261 nr_pages -= ret; 262 index += ret; 263 } 264 /* the inode may be gone now */ 265 return 0; 266 } 267 268 /* 269 * do the cleanup once all the compressed pages hit the disk. 270 * This will clear writeback on the file pages and free the compressed 271 * pages. 272 * 273 * This also calls the writeback end hooks for the file pages so that 274 * metadata and checksums can be updated in the file. 275 */ 276 static void end_compressed_bio_write(struct bio *bio, int err) 277 { 278 struct extent_io_tree *tree; 279 struct compressed_bio *cb = bio->bi_private; 280 struct inode *inode; 281 struct page *page; 282 unsigned long index; 283 284 if (err) 285 cb->errors = 1; 286 287 /* if there are more bios still pending for this compressed 288 * extent, just exit 289 */ 290 if (!atomic_dec_and_test(&cb->pending_bios)) 291 goto out; 292 293 /* ok, we're the last bio for this extent, step one is to 294 * call back into the FS and do all the end_io operations 295 */ 296 inode = cb->inode; 297 tree = &BTRFS_I(inode)->io_tree; 298 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 299 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 300 cb->start, 301 cb->start + cb->len - 1, 302 NULL, 1); 303 cb->compressed_pages[0]->mapping = NULL; 304 305 end_compressed_writeback(inode, cb->start, cb->len); 306 /* note, our inode could be gone now */ 307 308 /* 309 * release the compressed pages, these came from alloc_page and 310 * are not attached to the inode at all 311 */ 312 index = 0; 313 for (index = 0; index < cb->nr_pages; index++) { 314 page = cb->compressed_pages[index]; 315 page->mapping = NULL; 316 page_cache_release(page); 317 } 318 319 /* finally free the cb struct */ 320 kfree(cb->compressed_pages); 321 kfree(cb); 322 out: 323 bio_put(bio); 324 } 325 326 /* 327 * worker function to build and submit bios for previously compressed pages. 328 * The corresponding pages in the inode should be marked for writeback 329 * and the compressed pages should have a reference on them for dropping 330 * when the IO is complete. 331 * 332 * This also checksums the file bytes and gets things ready for 333 * the end io hooks. 334 */ 335 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 336 unsigned long len, u64 disk_start, 337 unsigned long compressed_len, 338 struct page **compressed_pages, 339 unsigned long nr_pages) 340 { 341 struct bio *bio = NULL; 342 struct btrfs_root *root = BTRFS_I(inode)->root; 343 struct compressed_bio *cb; 344 unsigned long bytes_left; 345 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 346 int page_index = 0; 347 struct page *page; 348 u64 first_byte = disk_start; 349 struct block_device *bdev; 350 int ret; 351 352 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 353 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 354 atomic_set(&cb->pending_bios, 0); 355 cb->errors = 0; 356 cb->inode = inode; 357 cb->start = start; 358 cb->len = len; 359 cb->mirror_num = 0; 360 cb->compressed_pages = compressed_pages; 361 cb->compressed_len = compressed_len; 362 cb->orig_bio = NULL; 363 cb->nr_pages = nr_pages; 364 365 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 366 367 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 368 bio->bi_private = cb; 369 bio->bi_end_io = end_compressed_bio_write; 370 atomic_inc(&cb->pending_bios); 371 372 /* create and submit bios for the compressed pages */ 373 bytes_left = compressed_len; 374 for (page_index = 0; page_index < cb->nr_pages; page_index++) { 375 page = compressed_pages[page_index]; 376 page->mapping = inode->i_mapping; 377 if (bio->bi_size) 378 ret = io_tree->ops->merge_bio_hook(page, 0, 379 PAGE_CACHE_SIZE, 380 bio, 0); 381 else 382 ret = 0; 383 384 page->mapping = NULL; 385 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 386 PAGE_CACHE_SIZE) { 387 bio_get(bio); 388 389 /* 390 * inc the count before we submit the bio so 391 * we know the end IO handler won't happen before 392 * we inc the count. Otherwise, the cb might get 393 * freed before we're done setting it up 394 */ 395 atomic_inc(&cb->pending_bios); 396 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 397 BUG_ON(ret); 398 399 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 400 BUG_ON(ret); 401 402 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 403 BUG_ON(ret); 404 405 bio_put(bio); 406 407 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 408 bio->bi_private = cb; 409 bio->bi_end_io = end_compressed_bio_write; 410 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 411 } 412 if (bytes_left < PAGE_CACHE_SIZE) { 413 printk("bytes left %lu compress len %lu nr %lu\n", 414 bytes_left, cb->compressed_len, cb->nr_pages); 415 } 416 bytes_left -= PAGE_CACHE_SIZE; 417 first_byte += PAGE_CACHE_SIZE; 418 cond_resched(); 419 } 420 bio_get(bio); 421 422 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 423 BUG_ON(ret); 424 425 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 426 BUG_ON(ret); 427 428 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 429 BUG_ON(ret); 430 431 bio_put(bio); 432 return 0; 433 } 434 435 static noinline int add_ra_bio_pages(struct inode *inode, 436 u64 compressed_end, 437 struct compressed_bio *cb) 438 { 439 unsigned long end_index; 440 unsigned long page_index; 441 u64 last_offset; 442 u64 isize = i_size_read(inode); 443 int ret; 444 struct page *page; 445 unsigned long nr_pages = 0; 446 struct extent_map *em; 447 struct address_space *mapping = inode->i_mapping; 448 struct pagevec pvec; 449 struct extent_map_tree *em_tree; 450 struct extent_io_tree *tree; 451 u64 end; 452 int misses = 0; 453 454 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 455 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 456 em_tree = &BTRFS_I(inode)->extent_tree; 457 tree = &BTRFS_I(inode)->io_tree; 458 459 if (isize == 0) 460 return 0; 461 462 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 463 464 pagevec_init(&pvec, 0); 465 while (last_offset < compressed_end) { 466 page_index = last_offset >> PAGE_CACHE_SHIFT; 467 468 if (page_index > end_index) 469 break; 470 471 rcu_read_lock(); 472 page = radix_tree_lookup(&mapping->page_tree, page_index); 473 rcu_read_unlock(); 474 if (page) { 475 misses++; 476 if (misses > 4) 477 break; 478 goto next; 479 } 480 481 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); 482 if (!page) 483 break; 484 485 page->index = page_index; 486 /* 487 * what we want to do here is call add_to_page_cache_lru, 488 * but that isn't exported, so we reproduce it here 489 */ 490 if (add_to_page_cache(page, mapping, 491 page->index, GFP_NOFS)) { 492 page_cache_release(page); 493 goto next; 494 } 495 496 /* open coding of lru_cache_add, also not exported */ 497 page_cache_get(page); 498 if (!pagevec_add(&pvec, page)) 499 __pagevec_lru_add_file(&pvec); 500 501 end = last_offset + PAGE_CACHE_SIZE - 1; 502 /* 503 * at this point, we have a locked page in the page cache 504 * for these bytes in the file. But, we have to make 505 * sure they map to this compressed extent on disk. 506 */ 507 set_page_extent_mapped(page); 508 lock_extent(tree, last_offset, end, GFP_NOFS); 509 spin_lock(&em_tree->lock); 510 em = lookup_extent_mapping(em_tree, last_offset, 511 PAGE_CACHE_SIZE); 512 spin_unlock(&em_tree->lock); 513 514 if (!em || last_offset < em->start || 515 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 516 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 517 free_extent_map(em); 518 unlock_extent(tree, last_offset, end, GFP_NOFS); 519 unlock_page(page); 520 page_cache_release(page); 521 break; 522 } 523 free_extent_map(em); 524 525 if (page->index == end_index) { 526 char *userpage; 527 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 528 529 if (zero_offset) { 530 int zeros; 531 zeros = PAGE_CACHE_SIZE - zero_offset; 532 userpage = kmap_atomic(page, KM_USER0); 533 memset(userpage + zero_offset, 0, zeros); 534 flush_dcache_page(page); 535 kunmap_atomic(userpage, KM_USER0); 536 } 537 } 538 539 ret = bio_add_page(cb->orig_bio, page, 540 PAGE_CACHE_SIZE, 0); 541 542 if (ret == PAGE_CACHE_SIZE) { 543 nr_pages++; 544 page_cache_release(page); 545 } else { 546 unlock_extent(tree, last_offset, end, GFP_NOFS); 547 unlock_page(page); 548 page_cache_release(page); 549 break; 550 } 551 next: 552 last_offset += PAGE_CACHE_SIZE; 553 } 554 if (pagevec_count(&pvec)) 555 __pagevec_lru_add_file(&pvec); 556 return 0; 557 } 558 559 /* 560 * for a compressed read, the bio we get passed has all the inode pages 561 * in it. We don't actually do IO on those pages but allocate new ones 562 * to hold the compressed pages on disk. 563 * 564 * bio->bi_sector points to the compressed extent on disk 565 * bio->bi_io_vec points to all of the inode pages 566 * bio->bi_vcnt is a count of pages 567 * 568 * After the compressed pages are read, we copy the bytes into the 569 * bio we were passed and then call the bio end_io calls 570 */ 571 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 572 int mirror_num, unsigned long bio_flags) 573 { 574 struct extent_io_tree *tree; 575 struct extent_map_tree *em_tree; 576 struct compressed_bio *cb; 577 struct btrfs_root *root = BTRFS_I(inode)->root; 578 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 579 unsigned long compressed_len; 580 unsigned long nr_pages; 581 unsigned long page_index; 582 struct page *page; 583 struct block_device *bdev; 584 struct bio *comp_bio; 585 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 586 u64 em_len; 587 u64 em_start; 588 struct extent_map *em; 589 int ret; 590 u32 *sums; 591 592 tree = &BTRFS_I(inode)->io_tree; 593 em_tree = &BTRFS_I(inode)->extent_tree; 594 595 /* we need the actual starting offset of this extent in the file */ 596 spin_lock(&em_tree->lock); 597 em = lookup_extent_mapping(em_tree, 598 page_offset(bio->bi_io_vec->bv_page), 599 PAGE_CACHE_SIZE); 600 spin_unlock(&em_tree->lock); 601 602 compressed_len = em->block_len; 603 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 604 atomic_set(&cb->pending_bios, 0); 605 cb->errors = 0; 606 cb->inode = inode; 607 cb->mirror_num = mirror_num; 608 sums = &cb->sums; 609 610 cb->start = em->orig_start; 611 em_len = em->len; 612 em_start = em->start; 613 614 free_extent_map(em); 615 em = NULL; 616 617 cb->len = uncompressed_len; 618 cb->compressed_len = compressed_len; 619 cb->orig_bio = bio; 620 621 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / 622 PAGE_CACHE_SIZE; 623 cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, 624 GFP_NOFS); 625 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 626 627 for (page_index = 0; page_index < nr_pages; page_index++) { 628 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | 629 __GFP_HIGHMEM); 630 } 631 cb->nr_pages = nr_pages; 632 633 add_ra_bio_pages(inode, em_start + em_len, cb); 634 635 /* include any pages we added in add_ra-bio_pages */ 636 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 637 cb->len = uncompressed_len; 638 639 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 640 comp_bio->bi_private = cb; 641 comp_bio->bi_end_io = end_compressed_bio_read; 642 atomic_inc(&cb->pending_bios); 643 644 for (page_index = 0; page_index < nr_pages; page_index++) { 645 page = cb->compressed_pages[page_index]; 646 page->mapping = inode->i_mapping; 647 page->index = em_start >> PAGE_CACHE_SHIFT; 648 649 if (comp_bio->bi_size) 650 ret = tree->ops->merge_bio_hook(page, 0, 651 PAGE_CACHE_SIZE, 652 comp_bio, 0); 653 else 654 ret = 0; 655 656 page->mapping = NULL; 657 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 658 PAGE_CACHE_SIZE) { 659 bio_get(comp_bio); 660 661 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 662 BUG_ON(ret); 663 664 /* 665 * inc the count before we submit the bio so 666 * we know the end IO handler won't happen before 667 * we inc the count. Otherwise, the cb might get 668 * freed before we're done setting it up 669 */ 670 atomic_inc(&cb->pending_bios); 671 672 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 673 btrfs_lookup_bio_sums(root, inode, comp_bio, 674 sums); 675 } 676 sums += (comp_bio->bi_size + root->sectorsize - 1) / 677 root->sectorsize; 678 679 ret = btrfs_map_bio(root, READ, comp_bio, 680 mirror_num, 0); 681 BUG_ON(ret); 682 683 bio_put(comp_bio); 684 685 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 686 GFP_NOFS); 687 comp_bio->bi_private = cb; 688 comp_bio->bi_end_io = end_compressed_bio_read; 689 690 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 691 } 692 cur_disk_byte += PAGE_CACHE_SIZE; 693 } 694 bio_get(comp_bio); 695 696 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 697 BUG_ON(ret); 698 699 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) 700 btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 701 702 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 703 BUG_ON(ret); 704 705 bio_put(comp_bio); 706 return 0; 707 } 708