1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/slab.h> 35 #include <linux/sched/mm.h> 36 #include <linux/log2.h> 37 #include "ctree.h" 38 #include "disk-io.h" 39 #include "transaction.h" 40 #include "btrfs_inode.h" 41 #include "volumes.h" 42 #include "ordered-data.h" 43 #include "compression.h" 44 #include "extent_io.h" 45 #include "extent_map.h" 46 47 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; 48 49 const char* btrfs_compress_type2str(enum btrfs_compression_type type) 50 { 51 switch (type) { 52 case BTRFS_COMPRESS_ZLIB: 53 case BTRFS_COMPRESS_LZO: 54 case BTRFS_COMPRESS_ZSTD: 55 case BTRFS_COMPRESS_NONE: 56 return btrfs_compress_types[type]; 57 } 58 59 return NULL; 60 } 61 62 static int btrfs_decompress_bio(struct compressed_bio *cb); 63 64 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, 65 unsigned long disk_size) 66 { 67 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 68 69 return sizeof(struct compressed_bio) + 70 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; 71 } 72 73 static int check_compressed_csum(struct btrfs_inode *inode, 74 struct compressed_bio *cb, 75 u64 disk_start) 76 { 77 int ret; 78 struct page *page; 79 unsigned long i; 80 char *kaddr; 81 u32 csum; 82 u32 *cb_sum = &cb->sums; 83 84 if (inode->flags & BTRFS_INODE_NODATASUM) 85 return 0; 86 87 for (i = 0; i < cb->nr_pages; i++) { 88 page = cb->compressed_pages[i]; 89 csum = ~(u32)0; 90 91 kaddr = kmap_atomic(page); 92 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); 93 btrfs_csum_final(csum, (u8 *)&csum); 94 kunmap_atomic(kaddr); 95 96 if (csum != *cb_sum) { 97 btrfs_print_data_csum_error(inode, disk_start, csum, 98 *cb_sum, cb->mirror_num); 99 ret = -EIO; 100 goto fail; 101 } 102 cb_sum++; 103 104 } 105 ret = 0; 106 fail: 107 return ret; 108 } 109 110 /* when we finish reading compressed pages from the disk, we 111 * decompress them and then run the bio end_io routines on the 112 * decompressed pages (in the inode address space). 113 * 114 * This allows the checksumming and other IO error handling routines 115 * to work normally 116 * 117 * The compressed pages are freed here, and it must be run 118 * in process context 119 */ 120 static void end_compressed_bio_read(struct bio *bio) 121 { 122 struct compressed_bio *cb = bio->bi_private; 123 struct inode *inode; 124 struct page *page; 125 unsigned long index; 126 unsigned int mirror = btrfs_io_bio(bio)->mirror_num; 127 int ret = 0; 128 129 if (bio->bi_status) 130 cb->errors = 1; 131 132 /* if there are more bios still pending for this compressed 133 * extent, just exit 134 */ 135 if (!refcount_dec_and_test(&cb->pending_bios)) 136 goto out; 137 138 /* 139 * Record the correct mirror_num in cb->orig_bio so that 140 * read-repair can work properly. 141 */ 142 ASSERT(btrfs_io_bio(cb->orig_bio)); 143 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; 144 cb->mirror_num = mirror; 145 146 /* 147 * Some IO in this cb have failed, just skip checksum as there 148 * is no way it could be correct. 149 */ 150 if (cb->errors == 1) 151 goto csum_failed; 152 153 inode = cb->inode; 154 ret = check_compressed_csum(BTRFS_I(inode), cb, 155 (u64)bio->bi_iter.bi_sector << 9); 156 if (ret) 157 goto csum_failed; 158 159 /* ok, we're the last bio for this extent, lets start 160 * the decompression. 161 */ 162 ret = btrfs_decompress_bio(cb); 163 164 csum_failed: 165 if (ret) 166 cb->errors = 1; 167 168 /* release the compressed pages */ 169 index = 0; 170 for (index = 0; index < cb->nr_pages; index++) { 171 page = cb->compressed_pages[index]; 172 page->mapping = NULL; 173 put_page(page); 174 } 175 176 /* do io completion on the original bio */ 177 if (cb->errors) { 178 bio_io_error(cb->orig_bio); 179 } else { 180 int i; 181 struct bio_vec *bvec; 182 183 /* 184 * we have verified the checksum already, set page 185 * checked so the end_io handlers know about it 186 */ 187 ASSERT(!bio_flagged(bio, BIO_CLONED)); 188 bio_for_each_segment_all(bvec, cb->orig_bio, i) 189 SetPageChecked(bvec->bv_page); 190 191 bio_endio(cb->orig_bio); 192 } 193 194 /* finally free the cb struct */ 195 kfree(cb->compressed_pages); 196 kfree(cb); 197 out: 198 bio_put(bio); 199 } 200 201 /* 202 * Clear the writeback bits on all of the file 203 * pages for a compressed write 204 */ 205 static noinline void end_compressed_writeback(struct inode *inode, 206 const struct compressed_bio *cb) 207 { 208 unsigned long index = cb->start >> PAGE_SHIFT; 209 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; 210 struct page *pages[16]; 211 unsigned long nr_pages = end_index - index + 1; 212 int i; 213 int ret; 214 215 if (cb->errors) 216 mapping_set_error(inode->i_mapping, -EIO); 217 218 while (nr_pages > 0) { 219 ret = find_get_pages_contig(inode->i_mapping, index, 220 min_t(unsigned long, 221 nr_pages, ARRAY_SIZE(pages)), pages); 222 if (ret == 0) { 223 nr_pages -= 1; 224 index += 1; 225 continue; 226 } 227 for (i = 0; i < ret; i++) { 228 if (cb->errors) 229 SetPageError(pages[i]); 230 end_page_writeback(pages[i]); 231 put_page(pages[i]); 232 } 233 nr_pages -= ret; 234 index += ret; 235 } 236 /* the inode may be gone now */ 237 } 238 239 /* 240 * do the cleanup once all the compressed pages hit the disk. 241 * This will clear writeback on the file pages and free the compressed 242 * pages. 243 * 244 * This also calls the writeback end hooks for the file pages so that 245 * metadata and checksums can be updated in the file. 246 */ 247 static void end_compressed_bio_write(struct bio *bio) 248 { 249 struct extent_io_tree *tree; 250 struct compressed_bio *cb = bio->bi_private; 251 struct inode *inode; 252 struct page *page; 253 unsigned long index; 254 255 if (bio->bi_status) 256 cb->errors = 1; 257 258 /* if there are more bios still pending for this compressed 259 * extent, just exit 260 */ 261 if (!refcount_dec_and_test(&cb->pending_bios)) 262 goto out; 263 264 /* ok, we're the last bio for this extent, step one is to 265 * call back into the FS and do all the end_io operations 266 */ 267 inode = cb->inode; 268 tree = &BTRFS_I(inode)->io_tree; 269 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 270 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 271 cb->start, 272 cb->start + cb->len - 1, 273 NULL, 274 bio->bi_status ? 275 BLK_STS_OK : BLK_STS_NOTSUPP); 276 cb->compressed_pages[0]->mapping = NULL; 277 278 end_compressed_writeback(inode, cb); 279 /* note, our inode could be gone now */ 280 281 /* 282 * release the compressed pages, these came from alloc_page and 283 * are not attached to the inode at all 284 */ 285 index = 0; 286 for (index = 0; index < cb->nr_pages; index++) { 287 page = cb->compressed_pages[index]; 288 page->mapping = NULL; 289 put_page(page); 290 } 291 292 /* finally free the cb struct */ 293 kfree(cb->compressed_pages); 294 kfree(cb); 295 out: 296 bio_put(bio); 297 } 298 299 /* 300 * worker function to build and submit bios for previously compressed pages. 301 * The corresponding pages in the inode should be marked for writeback 302 * and the compressed pages should have a reference on them for dropping 303 * when the IO is complete. 304 * 305 * This also checksums the file bytes and gets things ready for 306 * the end io hooks. 307 */ 308 blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, 309 unsigned long len, u64 disk_start, 310 unsigned long compressed_len, 311 struct page **compressed_pages, 312 unsigned long nr_pages, 313 unsigned int write_flags) 314 { 315 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 316 struct bio *bio = NULL; 317 struct compressed_bio *cb; 318 unsigned long bytes_left; 319 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 320 int pg_index = 0; 321 struct page *page; 322 u64 first_byte = disk_start; 323 struct block_device *bdev; 324 blk_status_t ret; 325 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 326 327 WARN_ON(start & ((u64)PAGE_SIZE - 1)); 328 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); 329 if (!cb) 330 return BLK_STS_RESOURCE; 331 refcount_set(&cb->pending_bios, 0); 332 cb->errors = 0; 333 cb->inode = inode; 334 cb->start = start; 335 cb->len = len; 336 cb->mirror_num = 0; 337 cb->compressed_pages = compressed_pages; 338 cb->compressed_len = compressed_len; 339 cb->orig_bio = NULL; 340 cb->nr_pages = nr_pages; 341 342 bdev = fs_info->fs_devices->latest_bdev; 343 344 bio = btrfs_bio_alloc(bdev, first_byte); 345 bio->bi_opf = REQ_OP_WRITE | write_flags; 346 bio->bi_private = cb; 347 bio->bi_end_io = end_compressed_bio_write; 348 refcount_set(&cb->pending_bios, 1); 349 350 /* create and submit bios for the compressed pages */ 351 bytes_left = compressed_len; 352 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 353 int submit = 0; 354 355 page = compressed_pages[pg_index]; 356 page->mapping = inode->i_mapping; 357 if (bio->bi_iter.bi_size) 358 submit = io_tree->ops->merge_bio_hook(page, 0, 359 PAGE_SIZE, 360 bio, 0); 361 362 page->mapping = NULL; 363 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < 364 PAGE_SIZE) { 365 /* 366 * inc the count before we submit the bio so 367 * we know the end IO handler won't happen before 368 * we inc the count. Otherwise, the cb might get 369 * freed before we're done setting it up 370 */ 371 refcount_inc(&cb->pending_bios); 372 ret = btrfs_bio_wq_end_io(fs_info, bio, 373 BTRFS_WQ_ENDIO_DATA); 374 BUG_ON(ret); /* -ENOMEM */ 375 376 if (!skip_sum) { 377 ret = btrfs_csum_one_bio(inode, bio, start, 1); 378 BUG_ON(ret); /* -ENOMEM */ 379 } 380 381 ret = btrfs_map_bio(fs_info, bio, 0, 1); 382 if (ret) { 383 bio->bi_status = ret; 384 bio_endio(bio); 385 } 386 387 bio = btrfs_bio_alloc(bdev, first_byte); 388 bio->bi_opf = REQ_OP_WRITE | write_flags; 389 bio->bi_private = cb; 390 bio->bi_end_io = end_compressed_bio_write; 391 bio_add_page(bio, page, PAGE_SIZE, 0); 392 } 393 if (bytes_left < PAGE_SIZE) { 394 btrfs_info(fs_info, 395 "bytes left %lu compress len %lu nr %lu", 396 bytes_left, cb->compressed_len, cb->nr_pages); 397 } 398 bytes_left -= PAGE_SIZE; 399 first_byte += PAGE_SIZE; 400 cond_resched(); 401 } 402 403 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 404 BUG_ON(ret); /* -ENOMEM */ 405 406 if (!skip_sum) { 407 ret = btrfs_csum_one_bio(inode, bio, start, 1); 408 BUG_ON(ret); /* -ENOMEM */ 409 } 410 411 ret = btrfs_map_bio(fs_info, bio, 0, 1); 412 if (ret) { 413 bio->bi_status = ret; 414 bio_endio(bio); 415 } 416 417 return 0; 418 } 419 420 static u64 bio_end_offset(struct bio *bio) 421 { 422 struct bio_vec *last = bio_last_bvec_all(bio); 423 424 return page_offset(last->bv_page) + last->bv_len + last->bv_offset; 425 } 426 427 static noinline int add_ra_bio_pages(struct inode *inode, 428 u64 compressed_end, 429 struct compressed_bio *cb) 430 { 431 unsigned long end_index; 432 unsigned long pg_index; 433 u64 last_offset; 434 u64 isize = i_size_read(inode); 435 int ret; 436 struct page *page; 437 unsigned long nr_pages = 0; 438 struct extent_map *em; 439 struct address_space *mapping = inode->i_mapping; 440 struct extent_map_tree *em_tree; 441 struct extent_io_tree *tree; 442 u64 end; 443 int misses = 0; 444 445 last_offset = bio_end_offset(cb->orig_bio); 446 em_tree = &BTRFS_I(inode)->extent_tree; 447 tree = &BTRFS_I(inode)->io_tree; 448 449 if (isize == 0) 450 return 0; 451 452 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; 453 454 while (last_offset < compressed_end) { 455 pg_index = last_offset >> PAGE_SHIFT; 456 457 if (pg_index > end_index) 458 break; 459 460 rcu_read_lock(); 461 page = radix_tree_lookup(&mapping->page_tree, pg_index); 462 rcu_read_unlock(); 463 if (page && !radix_tree_exceptional_entry(page)) { 464 misses++; 465 if (misses > 4) 466 break; 467 goto next; 468 } 469 470 page = __page_cache_alloc(mapping_gfp_constraint(mapping, 471 ~__GFP_FS)); 472 if (!page) 473 break; 474 475 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 476 put_page(page); 477 goto next; 478 } 479 480 end = last_offset + PAGE_SIZE - 1; 481 /* 482 * at this point, we have a locked page in the page cache 483 * for these bytes in the file. But, we have to make 484 * sure they map to this compressed extent on disk. 485 */ 486 set_page_extent_mapped(page); 487 lock_extent(tree, last_offset, end); 488 read_lock(&em_tree->lock); 489 em = lookup_extent_mapping(em_tree, last_offset, 490 PAGE_SIZE); 491 read_unlock(&em_tree->lock); 492 493 if (!em || last_offset < em->start || 494 (last_offset + PAGE_SIZE > extent_map_end(em)) || 495 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 496 free_extent_map(em); 497 unlock_extent(tree, last_offset, end); 498 unlock_page(page); 499 put_page(page); 500 break; 501 } 502 free_extent_map(em); 503 504 if (page->index == end_index) { 505 char *userpage; 506 size_t zero_offset = isize & (PAGE_SIZE - 1); 507 508 if (zero_offset) { 509 int zeros; 510 zeros = PAGE_SIZE - zero_offset; 511 userpage = kmap_atomic(page); 512 memset(userpage + zero_offset, 0, zeros); 513 flush_dcache_page(page); 514 kunmap_atomic(userpage); 515 } 516 } 517 518 ret = bio_add_page(cb->orig_bio, page, 519 PAGE_SIZE, 0); 520 521 if (ret == PAGE_SIZE) { 522 nr_pages++; 523 put_page(page); 524 } else { 525 unlock_extent(tree, last_offset, end); 526 unlock_page(page); 527 put_page(page); 528 break; 529 } 530 next: 531 last_offset += PAGE_SIZE; 532 } 533 return 0; 534 } 535 536 /* 537 * for a compressed read, the bio we get passed has all the inode pages 538 * in it. We don't actually do IO on those pages but allocate new ones 539 * to hold the compressed pages on disk. 540 * 541 * bio->bi_iter.bi_sector points to the compressed extent on disk 542 * bio->bi_io_vec points to all of the inode pages 543 * 544 * After the compressed pages are read, we copy the bytes into the 545 * bio we were passed and then call the bio end_io calls 546 */ 547 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 548 int mirror_num, unsigned long bio_flags) 549 { 550 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 551 struct extent_io_tree *tree; 552 struct extent_map_tree *em_tree; 553 struct compressed_bio *cb; 554 unsigned long compressed_len; 555 unsigned long nr_pages; 556 unsigned long pg_index; 557 struct page *page; 558 struct block_device *bdev; 559 struct bio *comp_bio; 560 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 561 u64 em_len; 562 u64 em_start; 563 struct extent_map *em; 564 blk_status_t ret = BLK_STS_RESOURCE; 565 int faili = 0; 566 u32 *sums; 567 568 tree = &BTRFS_I(inode)->io_tree; 569 em_tree = &BTRFS_I(inode)->extent_tree; 570 571 /* we need the actual starting offset of this extent in the file */ 572 read_lock(&em_tree->lock); 573 em = lookup_extent_mapping(em_tree, 574 page_offset(bio_first_page_all(bio)), 575 PAGE_SIZE); 576 read_unlock(&em_tree->lock); 577 if (!em) 578 return BLK_STS_IOERR; 579 580 compressed_len = em->block_len; 581 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); 582 if (!cb) 583 goto out; 584 585 refcount_set(&cb->pending_bios, 0); 586 cb->errors = 0; 587 cb->inode = inode; 588 cb->mirror_num = mirror_num; 589 sums = &cb->sums; 590 591 cb->start = em->orig_start; 592 em_len = em->len; 593 em_start = em->start; 594 595 free_extent_map(em); 596 em = NULL; 597 598 cb->len = bio->bi_iter.bi_size; 599 cb->compressed_len = compressed_len; 600 cb->compress_type = extent_compress_type(bio_flags); 601 cb->orig_bio = bio; 602 603 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); 604 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 605 GFP_NOFS); 606 if (!cb->compressed_pages) 607 goto fail1; 608 609 bdev = fs_info->fs_devices->latest_bdev; 610 611 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 612 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | 613 __GFP_HIGHMEM); 614 if (!cb->compressed_pages[pg_index]) { 615 faili = pg_index - 1; 616 ret = BLK_STS_RESOURCE; 617 goto fail2; 618 } 619 } 620 faili = nr_pages - 1; 621 cb->nr_pages = nr_pages; 622 623 add_ra_bio_pages(inode, em_start + em_len, cb); 624 625 /* include any pages we added in add_ra-bio_pages */ 626 cb->len = bio->bi_iter.bi_size; 627 628 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); 629 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); 630 comp_bio->bi_private = cb; 631 comp_bio->bi_end_io = end_compressed_bio_read; 632 refcount_set(&cb->pending_bios, 1); 633 634 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 635 int submit = 0; 636 637 page = cb->compressed_pages[pg_index]; 638 page->mapping = inode->i_mapping; 639 page->index = em_start >> PAGE_SHIFT; 640 641 if (comp_bio->bi_iter.bi_size) 642 submit = tree->ops->merge_bio_hook(page, 0, 643 PAGE_SIZE, 644 comp_bio, 0); 645 646 page->mapping = NULL; 647 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 648 PAGE_SIZE) { 649 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, 650 BTRFS_WQ_ENDIO_DATA); 651 BUG_ON(ret); /* -ENOMEM */ 652 653 /* 654 * inc the count before we submit the bio so 655 * we know the end IO handler won't happen before 656 * we inc the count. Otherwise, the cb might get 657 * freed before we're done setting it up 658 */ 659 refcount_inc(&cb->pending_bios); 660 661 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 662 ret = btrfs_lookup_bio_sums(inode, comp_bio, 663 sums); 664 BUG_ON(ret); /* -ENOMEM */ 665 } 666 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, 667 fs_info->sectorsize); 668 669 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 670 if (ret) { 671 comp_bio->bi_status = ret; 672 bio_endio(comp_bio); 673 } 674 675 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); 676 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); 677 comp_bio->bi_private = cb; 678 comp_bio->bi_end_io = end_compressed_bio_read; 679 680 bio_add_page(comp_bio, page, PAGE_SIZE, 0); 681 } 682 cur_disk_byte += PAGE_SIZE; 683 } 684 685 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); 686 BUG_ON(ret); /* -ENOMEM */ 687 688 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 689 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); 690 BUG_ON(ret); /* -ENOMEM */ 691 } 692 693 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 694 if (ret) { 695 comp_bio->bi_status = ret; 696 bio_endio(comp_bio); 697 } 698 699 return 0; 700 701 fail2: 702 while (faili >= 0) { 703 __free_page(cb->compressed_pages[faili]); 704 faili--; 705 } 706 707 kfree(cb->compressed_pages); 708 fail1: 709 kfree(cb); 710 out: 711 free_extent_map(em); 712 return ret; 713 } 714 715 /* 716 * Heuristic uses systematic sampling to collect data from the input data 717 * range, the logic can be tuned by the following constants: 718 * 719 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample 720 * @SAMPLING_INTERVAL - range from which the sampled data can be collected 721 */ 722 #define SAMPLING_READ_SIZE (16) 723 #define SAMPLING_INTERVAL (256) 724 725 /* 726 * For statistical analysis of the input data we consider bytes that form a 727 * Galois Field of 256 objects. Each object has an attribute count, ie. how 728 * many times the object appeared in the sample. 729 */ 730 #define BUCKET_SIZE (256) 731 732 /* 733 * The size of the sample is based on a statistical sampling rule of thumb. 734 * The common way is to perform sampling tests as long as the number of 735 * elements in each cell is at least 5. 736 * 737 * Instead of 5, we choose 32 to obtain more accurate results. 738 * If the data contain the maximum number of symbols, which is 256, we obtain a 739 * sample size bound by 8192. 740 * 741 * For a sample of at most 8KB of data per data range: 16 consecutive bytes 742 * from up to 512 locations. 743 */ 744 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ 745 SAMPLING_READ_SIZE / SAMPLING_INTERVAL) 746 747 struct bucket_item { 748 u32 count; 749 }; 750 751 struct heuristic_ws { 752 /* Partial copy of input data */ 753 u8 *sample; 754 u32 sample_size; 755 /* Buckets store counters for each byte value */ 756 struct bucket_item *bucket; 757 /* Sorting buffer */ 758 struct bucket_item *bucket_b; 759 struct list_head list; 760 }; 761 762 static void free_heuristic_ws(struct list_head *ws) 763 { 764 struct heuristic_ws *workspace; 765 766 workspace = list_entry(ws, struct heuristic_ws, list); 767 768 kvfree(workspace->sample); 769 kfree(workspace->bucket); 770 kfree(workspace->bucket_b); 771 kfree(workspace); 772 } 773 774 static struct list_head *alloc_heuristic_ws(void) 775 { 776 struct heuristic_ws *ws; 777 778 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 779 if (!ws) 780 return ERR_PTR(-ENOMEM); 781 782 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); 783 if (!ws->sample) 784 goto fail; 785 786 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); 787 if (!ws->bucket) 788 goto fail; 789 790 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); 791 if (!ws->bucket_b) 792 goto fail; 793 794 INIT_LIST_HEAD(&ws->list); 795 return &ws->list; 796 fail: 797 free_heuristic_ws(&ws->list); 798 return ERR_PTR(-ENOMEM); 799 } 800 801 struct workspaces_list { 802 struct list_head idle_ws; 803 spinlock_t ws_lock; 804 /* Number of free workspaces */ 805 int free_ws; 806 /* Total number of allocated workspaces */ 807 atomic_t total_ws; 808 /* Waiters for a free workspace */ 809 wait_queue_head_t ws_wait; 810 }; 811 812 static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; 813 814 static struct workspaces_list btrfs_heuristic_ws; 815 816 static const struct btrfs_compress_op * const btrfs_compress_op[] = { 817 &btrfs_zlib_compress, 818 &btrfs_lzo_compress, 819 &btrfs_zstd_compress, 820 }; 821 822 void __init btrfs_init_compress(void) 823 { 824 struct list_head *workspace; 825 int i; 826 827 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); 828 spin_lock_init(&btrfs_heuristic_ws.ws_lock); 829 atomic_set(&btrfs_heuristic_ws.total_ws, 0); 830 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); 831 832 workspace = alloc_heuristic_ws(); 833 if (IS_ERR(workspace)) { 834 pr_warn( 835 "BTRFS: cannot preallocate heuristic workspace, will try later\n"); 836 } else { 837 atomic_set(&btrfs_heuristic_ws.total_ws, 1); 838 btrfs_heuristic_ws.free_ws = 1; 839 list_add(workspace, &btrfs_heuristic_ws.idle_ws); 840 } 841 842 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 843 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); 844 spin_lock_init(&btrfs_comp_ws[i].ws_lock); 845 atomic_set(&btrfs_comp_ws[i].total_ws, 0); 846 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); 847 848 /* 849 * Preallocate one workspace for each compression type so 850 * we can guarantee forward progress in the worst case 851 */ 852 workspace = btrfs_compress_op[i]->alloc_workspace(); 853 if (IS_ERR(workspace)) { 854 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); 855 } else { 856 atomic_set(&btrfs_comp_ws[i].total_ws, 1); 857 btrfs_comp_ws[i].free_ws = 1; 858 list_add(workspace, &btrfs_comp_ws[i].idle_ws); 859 } 860 } 861 } 862 863 /* 864 * This finds an available workspace or allocates a new one. 865 * If it's not possible to allocate a new one, waits until there's one. 866 * Preallocation makes a forward progress guarantees and we do not return 867 * errors. 868 */ 869 static struct list_head *__find_workspace(int type, bool heuristic) 870 { 871 struct list_head *workspace; 872 int cpus = num_online_cpus(); 873 int idx = type - 1; 874 unsigned nofs_flag; 875 struct list_head *idle_ws; 876 spinlock_t *ws_lock; 877 atomic_t *total_ws; 878 wait_queue_head_t *ws_wait; 879 int *free_ws; 880 881 if (heuristic) { 882 idle_ws = &btrfs_heuristic_ws.idle_ws; 883 ws_lock = &btrfs_heuristic_ws.ws_lock; 884 total_ws = &btrfs_heuristic_ws.total_ws; 885 ws_wait = &btrfs_heuristic_ws.ws_wait; 886 free_ws = &btrfs_heuristic_ws.free_ws; 887 } else { 888 idle_ws = &btrfs_comp_ws[idx].idle_ws; 889 ws_lock = &btrfs_comp_ws[idx].ws_lock; 890 total_ws = &btrfs_comp_ws[idx].total_ws; 891 ws_wait = &btrfs_comp_ws[idx].ws_wait; 892 free_ws = &btrfs_comp_ws[idx].free_ws; 893 } 894 895 again: 896 spin_lock(ws_lock); 897 if (!list_empty(idle_ws)) { 898 workspace = idle_ws->next; 899 list_del(workspace); 900 (*free_ws)--; 901 spin_unlock(ws_lock); 902 return workspace; 903 904 } 905 if (atomic_read(total_ws) > cpus) { 906 DEFINE_WAIT(wait); 907 908 spin_unlock(ws_lock); 909 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); 910 if (atomic_read(total_ws) > cpus && !*free_ws) 911 schedule(); 912 finish_wait(ws_wait, &wait); 913 goto again; 914 } 915 atomic_inc(total_ws); 916 spin_unlock(ws_lock); 917 918 /* 919 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have 920 * to turn it off here because we might get called from the restricted 921 * context of btrfs_compress_bio/btrfs_compress_pages 922 */ 923 nofs_flag = memalloc_nofs_save(); 924 if (heuristic) 925 workspace = alloc_heuristic_ws(); 926 else 927 workspace = btrfs_compress_op[idx]->alloc_workspace(); 928 memalloc_nofs_restore(nofs_flag); 929 930 if (IS_ERR(workspace)) { 931 atomic_dec(total_ws); 932 wake_up(ws_wait); 933 934 /* 935 * Do not return the error but go back to waiting. There's a 936 * workspace preallocated for each type and the compression 937 * time is bounded so we get to a workspace eventually. This 938 * makes our caller's life easier. 939 * 940 * To prevent silent and low-probability deadlocks (when the 941 * initial preallocation fails), check if there are any 942 * workspaces at all. 943 */ 944 if (atomic_read(total_ws) == 0) { 945 static DEFINE_RATELIMIT_STATE(_rs, 946 /* once per minute */ 60 * HZ, 947 /* no burst */ 1); 948 949 if (__ratelimit(&_rs)) { 950 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); 951 } 952 } 953 goto again; 954 } 955 return workspace; 956 } 957 958 static struct list_head *find_workspace(int type) 959 { 960 return __find_workspace(type, false); 961 } 962 963 /* 964 * put a workspace struct back on the list or free it if we have enough 965 * idle ones sitting around 966 */ 967 static void __free_workspace(int type, struct list_head *workspace, 968 bool heuristic) 969 { 970 int idx = type - 1; 971 struct list_head *idle_ws; 972 spinlock_t *ws_lock; 973 atomic_t *total_ws; 974 wait_queue_head_t *ws_wait; 975 int *free_ws; 976 977 if (heuristic) { 978 idle_ws = &btrfs_heuristic_ws.idle_ws; 979 ws_lock = &btrfs_heuristic_ws.ws_lock; 980 total_ws = &btrfs_heuristic_ws.total_ws; 981 ws_wait = &btrfs_heuristic_ws.ws_wait; 982 free_ws = &btrfs_heuristic_ws.free_ws; 983 } else { 984 idle_ws = &btrfs_comp_ws[idx].idle_ws; 985 ws_lock = &btrfs_comp_ws[idx].ws_lock; 986 total_ws = &btrfs_comp_ws[idx].total_ws; 987 ws_wait = &btrfs_comp_ws[idx].ws_wait; 988 free_ws = &btrfs_comp_ws[idx].free_ws; 989 } 990 991 spin_lock(ws_lock); 992 if (*free_ws <= num_online_cpus()) { 993 list_add(workspace, idle_ws); 994 (*free_ws)++; 995 spin_unlock(ws_lock); 996 goto wake; 997 } 998 spin_unlock(ws_lock); 999 1000 if (heuristic) 1001 free_heuristic_ws(workspace); 1002 else 1003 btrfs_compress_op[idx]->free_workspace(workspace); 1004 atomic_dec(total_ws); 1005 wake: 1006 /* 1007 * Make sure counter is updated before we wake up waiters. 1008 */ 1009 smp_mb(); 1010 if (waitqueue_active(ws_wait)) 1011 wake_up(ws_wait); 1012 } 1013 1014 static void free_workspace(int type, struct list_head *ws) 1015 { 1016 return __free_workspace(type, ws, false); 1017 } 1018 1019 /* 1020 * cleanup function for module exit 1021 */ 1022 static void free_workspaces(void) 1023 { 1024 struct list_head *workspace; 1025 int i; 1026 1027 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { 1028 workspace = btrfs_heuristic_ws.idle_ws.next; 1029 list_del(workspace); 1030 free_heuristic_ws(workspace); 1031 atomic_dec(&btrfs_heuristic_ws.total_ws); 1032 } 1033 1034 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 1035 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { 1036 workspace = btrfs_comp_ws[i].idle_ws.next; 1037 list_del(workspace); 1038 btrfs_compress_op[i]->free_workspace(workspace); 1039 atomic_dec(&btrfs_comp_ws[i].total_ws); 1040 } 1041 } 1042 } 1043 1044 /* 1045 * Given an address space and start and length, compress the bytes into @pages 1046 * that are allocated on demand. 1047 * 1048 * @type_level is encoded algorithm and level, where level 0 means whatever 1049 * default the algorithm chooses and is opaque here; 1050 * - compression algo are 0-3 1051 * - the level are bits 4-7 1052 * 1053 * @out_pages is an in/out parameter, holds maximum number of pages to allocate 1054 * and returns number of actually allocated pages 1055 * 1056 * @total_in is used to return the number of bytes actually read. It 1057 * may be smaller than the input length if we had to exit early because we 1058 * ran out of room in the pages array or because we cross the 1059 * max_out threshold. 1060 * 1061 * @total_out is an in/out parameter, must be set to the input length and will 1062 * be also used to return the total number of compressed bytes 1063 * 1064 * @max_out tells us the max number of bytes that we're allowed to 1065 * stuff into pages 1066 */ 1067 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, 1068 u64 start, struct page **pages, 1069 unsigned long *out_pages, 1070 unsigned long *total_in, 1071 unsigned long *total_out) 1072 { 1073 struct list_head *workspace; 1074 int ret; 1075 int type = type_level & 0xF; 1076 1077 workspace = find_workspace(type); 1078 1079 btrfs_compress_op[type - 1]->set_level(workspace, type_level); 1080 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 1081 start, pages, 1082 out_pages, 1083 total_in, total_out); 1084 free_workspace(type, workspace); 1085 return ret; 1086 } 1087 1088 /* 1089 * pages_in is an array of pages with compressed data. 1090 * 1091 * disk_start is the starting logical offset of this array in the file 1092 * 1093 * orig_bio contains the pages from the file that we want to decompress into 1094 * 1095 * srclen is the number of bytes in pages_in 1096 * 1097 * The basic idea is that we have a bio that was created by readpages. 1098 * The pages in the bio are for the uncompressed data, and they may not 1099 * be contiguous. They all correspond to the range of bytes covered by 1100 * the compressed extent. 1101 */ 1102 static int btrfs_decompress_bio(struct compressed_bio *cb) 1103 { 1104 struct list_head *workspace; 1105 int ret; 1106 int type = cb->compress_type; 1107 1108 workspace = find_workspace(type); 1109 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); 1110 free_workspace(type, workspace); 1111 1112 return ret; 1113 } 1114 1115 /* 1116 * a less complex decompression routine. Our compressed data fits in a 1117 * single page, and we want to read a single page out of it. 1118 * start_byte tells us the offset into the compressed data we're interested in 1119 */ 1120 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 1121 unsigned long start_byte, size_t srclen, size_t destlen) 1122 { 1123 struct list_head *workspace; 1124 int ret; 1125 1126 workspace = find_workspace(type); 1127 1128 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 1129 dest_page, start_byte, 1130 srclen, destlen); 1131 1132 free_workspace(type, workspace); 1133 return ret; 1134 } 1135 1136 void btrfs_exit_compress(void) 1137 { 1138 free_workspaces(); 1139 } 1140 1141 /* 1142 * Copy uncompressed data from working buffer to pages. 1143 * 1144 * buf_start is the byte offset we're of the start of our workspace buffer. 1145 * 1146 * total_out is the last byte of the buffer 1147 */ 1148 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, 1149 unsigned long total_out, u64 disk_start, 1150 struct bio *bio) 1151 { 1152 unsigned long buf_offset; 1153 unsigned long current_buf_start; 1154 unsigned long start_byte; 1155 unsigned long prev_start_byte; 1156 unsigned long working_bytes = total_out - buf_start; 1157 unsigned long bytes; 1158 char *kaddr; 1159 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); 1160 1161 /* 1162 * start byte is the first byte of the page we're currently 1163 * copying into relative to the start of the compressed data. 1164 */ 1165 start_byte = page_offset(bvec.bv_page) - disk_start; 1166 1167 /* we haven't yet hit data corresponding to this page */ 1168 if (total_out <= start_byte) 1169 return 1; 1170 1171 /* 1172 * the start of the data we care about is offset into 1173 * the middle of our working buffer 1174 */ 1175 if (total_out > start_byte && buf_start < start_byte) { 1176 buf_offset = start_byte - buf_start; 1177 working_bytes -= buf_offset; 1178 } else { 1179 buf_offset = 0; 1180 } 1181 current_buf_start = buf_start; 1182 1183 /* copy bytes from the working buffer into the pages */ 1184 while (working_bytes > 0) { 1185 bytes = min_t(unsigned long, bvec.bv_len, 1186 PAGE_SIZE - buf_offset); 1187 bytes = min(bytes, working_bytes); 1188 1189 kaddr = kmap_atomic(bvec.bv_page); 1190 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); 1191 kunmap_atomic(kaddr); 1192 flush_dcache_page(bvec.bv_page); 1193 1194 buf_offset += bytes; 1195 working_bytes -= bytes; 1196 current_buf_start += bytes; 1197 1198 /* check if we need to pick another page */ 1199 bio_advance(bio, bytes); 1200 if (!bio->bi_iter.bi_size) 1201 return 0; 1202 bvec = bio_iter_iovec(bio, bio->bi_iter); 1203 prev_start_byte = start_byte; 1204 start_byte = page_offset(bvec.bv_page) - disk_start; 1205 1206 /* 1207 * We need to make sure we're only adjusting 1208 * our offset into compression working buffer when 1209 * we're switching pages. Otherwise we can incorrectly 1210 * keep copying when we were actually done. 1211 */ 1212 if (start_byte != prev_start_byte) { 1213 /* 1214 * make sure our new page is covered by this 1215 * working buffer 1216 */ 1217 if (total_out <= start_byte) 1218 return 1; 1219 1220 /* 1221 * the next page in the biovec might not be adjacent 1222 * to the last page, but it might still be found 1223 * inside this working buffer. bump our offset pointer 1224 */ 1225 if (total_out > start_byte && 1226 current_buf_start < start_byte) { 1227 buf_offset = start_byte - buf_start; 1228 working_bytes = total_out - start_byte; 1229 current_buf_start = buf_start + buf_offset; 1230 } 1231 } 1232 } 1233 1234 return 1; 1235 } 1236 1237 /* 1238 * Shannon Entropy calculation 1239 * 1240 * Pure byte distribution analysis fails to determine compressiability of data. 1241 * Try calculating entropy to estimate the average minimum number of bits 1242 * needed to encode the sampled data. 1243 * 1244 * For convenience, return the percentage of needed bits, instead of amount of 1245 * bits directly. 1246 * 1247 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy 1248 * and can be compressible with high probability 1249 * 1250 * @ENTROPY_LVL_HIGH - data are not compressible with high probability 1251 * 1252 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. 1253 */ 1254 #define ENTROPY_LVL_ACEPTABLE (65) 1255 #define ENTROPY_LVL_HIGH (80) 1256 1257 /* 1258 * For increasead precision in shannon_entropy calculation, 1259 * let's do pow(n, M) to save more digits after comma: 1260 * 1261 * - maximum int bit length is 64 1262 * - ilog2(MAX_SAMPLE_SIZE) -> 13 1263 * - 13 * 4 = 52 < 64 -> M = 4 1264 * 1265 * So use pow(n, 4). 1266 */ 1267 static inline u32 ilog2_w(u64 n) 1268 { 1269 return ilog2(n * n * n * n); 1270 } 1271 1272 static u32 shannon_entropy(struct heuristic_ws *ws) 1273 { 1274 const u32 entropy_max = 8 * ilog2_w(2); 1275 u32 entropy_sum = 0; 1276 u32 p, p_base, sz_base; 1277 u32 i; 1278 1279 sz_base = ilog2_w(ws->sample_size); 1280 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { 1281 p = ws->bucket[i].count; 1282 p_base = ilog2_w(p); 1283 entropy_sum += p * (sz_base - p_base); 1284 } 1285 1286 entropy_sum /= ws->sample_size; 1287 return entropy_sum * 100 / entropy_max; 1288 } 1289 1290 #define RADIX_BASE 4U 1291 #define COUNTERS_SIZE (1U << RADIX_BASE) 1292 1293 static u8 get4bits(u64 num, int shift) { 1294 u8 low4bits; 1295 1296 num >>= shift; 1297 /* Reverse order */ 1298 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); 1299 return low4bits; 1300 } 1301 1302 /* 1303 * Use 4 bits as radix base 1304 * Use 16 u32 counters for calculating new possition in buf array 1305 * 1306 * @array - array that will be sorted 1307 * @array_buf - buffer array to store sorting results 1308 * must be equal in size to @array 1309 * @num - array size 1310 */ 1311 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, 1312 int num) 1313 { 1314 u64 max_num; 1315 u64 buf_num; 1316 u32 counters[COUNTERS_SIZE]; 1317 u32 new_addr; 1318 u32 addr; 1319 int bitlen; 1320 int shift; 1321 int i; 1322 1323 /* 1324 * Try avoid useless loop iterations for small numbers stored in big 1325 * counters. Example: 48 33 4 ... in 64bit array 1326 */ 1327 max_num = array[0].count; 1328 for (i = 1; i < num; i++) { 1329 buf_num = array[i].count; 1330 if (buf_num > max_num) 1331 max_num = buf_num; 1332 } 1333 1334 buf_num = ilog2(max_num); 1335 bitlen = ALIGN(buf_num, RADIX_BASE * 2); 1336 1337 shift = 0; 1338 while (shift < bitlen) { 1339 memset(counters, 0, sizeof(counters)); 1340 1341 for (i = 0; i < num; i++) { 1342 buf_num = array[i].count; 1343 addr = get4bits(buf_num, shift); 1344 counters[addr]++; 1345 } 1346 1347 for (i = 1; i < COUNTERS_SIZE; i++) 1348 counters[i] += counters[i - 1]; 1349 1350 for (i = num - 1; i >= 0; i--) { 1351 buf_num = array[i].count; 1352 addr = get4bits(buf_num, shift); 1353 counters[addr]--; 1354 new_addr = counters[addr]; 1355 array_buf[new_addr] = array[i]; 1356 } 1357 1358 shift += RADIX_BASE; 1359 1360 /* 1361 * Normal radix expects to move data from a temporary array, to 1362 * the main one. But that requires some CPU time. Avoid that 1363 * by doing another sort iteration to original array instead of 1364 * memcpy() 1365 */ 1366 memset(counters, 0, sizeof(counters)); 1367 1368 for (i = 0; i < num; i ++) { 1369 buf_num = array_buf[i].count; 1370 addr = get4bits(buf_num, shift); 1371 counters[addr]++; 1372 } 1373 1374 for (i = 1; i < COUNTERS_SIZE; i++) 1375 counters[i] += counters[i - 1]; 1376 1377 for (i = num - 1; i >= 0; i--) { 1378 buf_num = array_buf[i].count; 1379 addr = get4bits(buf_num, shift); 1380 counters[addr]--; 1381 new_addr = counters[addr]; 1382 array[new_addr] = array_buf[i]; 1383 } 1384 1385 shift += RADIX_BASE; 1386 } 1387 } 1388 1389 /* 1390 * Size of the core byte set - how many bytes cover 90% of the sample 1391 * 1392 * There are several types of structured binary data that use nearly all byte 1393 * values. The distribution can be uniform and counts in all buckets will be 1394 * nearly the same (eg. encrypted data). Unlikely to be compressible. 1395 * 1396 * Other possibility is normal (Gaussian) distribution, where the data could 1397 * be potentially compressible, but we have to take a few more steps to decide 1398 * how much. 1399 * 1400 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, 1401 * compression algo can easy fix that 1402 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high 1403 * probability is not compressible 1404 */ 1405 #define BYTE_CORE_SET_LOW (64) 1406 #define BYTE_CORE_SET_HIGH (200) 1407 1408 static int byte_core_set_size(struct heuristic_ws *ws) 1409 { 1410 u32 i; 1411 u32 coreset_sum = 0; 1412 const u32 core_set_threshold = ws->sample_size * 90 / 100; 1413 struct bucket_item *bucket = ws->bucket; 1414 1415 /* Sort in reverse order */ 1416 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); 1417 1418 for (i = 0; i < BYTE_CORE_SET_LOW; i++) 1419 coreset_sum += bucket[i].count; 1420 1421 if (coreset_sum > core_set_threshold) 1422 return i; 1423 1424 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { 1425 coreset_sum += bucket[i].count; 1426 if (coreset_sum > core_set_threshold) 1427 break; 1428 } 1429 1430 return i; 1431 } 1432 1433 /* 1434 * Count byte values in buckets. 1435 * This heuristic can detect textual data (configs, xml, json, html, etc). 1436 * Because in most text-like data byte set is restricted to limited number of 1437 * possible characters, and that restriction in most cases makes data easy to 1438 * compress. 1439 * 1440 * @BYTE_SET_THRESHOLD - consider all data within this byte set size: 1441 * less - compressible 1442 * more - need additional analysis 1443 */ 1444 #define BYTE_SET_THRESHOLD (64) 1445 1446 static u32 byte_set_size(const struct heuristic_ws *ws) 1447 { 1448 u32 i; 1449 u32 byte_set_size = 0; 1450 1451 for (i = 0; i < BYTE_SET_THRESHOLD; i++) { 1452 if (ws->bucket[i].count > 0) 1453 byte_set_size++; 1454 } 1455 1456 /* 1457 * Continue collecting count of byte values in buckets. If the byte 1458 * set size is bigger then the threshold, it's pointless to continue, 1459 * the detection technique would fail for this type of data. 1460 */ 1461 for (; i < BUCKET_SIZE; i++) { 1462 if (ws->bucket[i].count > 0) { 1463 byte_set_size++; 1464 if (byte_set_size > BYTE_SET_THRESHOLD) 1465 return byte_set_size; 1466 } 1467 } 1468 1469 return byte_set_size; 1470 } 1471 1472 static bool sample_repeated_patterns(struct heuristic_ws *ws) 1473 { 1474 const u32 half_of_sample = ws->sample_size / 2; 1475 const u8 *data = ws->sample; 1476 1477 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; 1478 } 1479 1480 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, 1481 struct heuristic_ws *ws) 1482 { 1483 struct page *page; 1484 u64 index, index_end; 1485 u32 i, curr_sample_pos; 1486 u8 *in_data; 1487 1488 /* 1489 * Compression handles the input data by chunks of 128KiB 1490 * (defined by BTRFS_MAX_UNCOMPRESSED) 1491 * 1492 * We do the same for the heuristic and loop over the whole range. 1493 * 1494 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will 1495 * process no more than BTRFS_MAX_UNCOMPRESSED at a time. 1496 */ 1497 if (end - start > BTRFS_MAX_UNCOMPRESSED) 1498 end = start + BTRFS_MAX_UNCOMPRESSED; 1499 1500 index = start >> PAGE_SHIFT; 1501 index_end = end >> PAGE_SHIFT; 1502 1503 /* Don't miss unaligned end */ 1504 if (!IS_ALIGNED(end, PAGE_SIZE)) 1505 index_end++; 1506 1507 curr_sample_pos = 0; 1508 while (index < index_end) { 1509 page = find_get_page(inode->i_mapping, index); 1510 in_data = kmap(page); 1511 /* Handle case where the start is not aligned to PAGE_SIZE */ 1512 i = start % PAGE_SIZE; 1513 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { 1514 /* Don't sample any garbage from the last page */ 1515 if (start > end - SAMPLING_READ_SIZE) 1516 break; 1517 memcpy(&ws->sample[curr_sample_pos], &in_data[i], 1518 SAMPLING_READ_SIZE); 1519 i += SAMPLING_INTERVAL; 1520 start += SAMPLING_INTERVAL; 1521 curr_sample_pos += SAMPLING_READ_SIZE; 1522 } 1523 kunmap(page); 1524 put_page(page); 1525 1526 index++; 1527 } 1528 1529 ws->sample_size = curr_sample_pos; 1530 } 1531 1532 /* 1533 * Compression heuristic. 1534 * 1535 * For now is's a naive and optimistic 'return true', we'll extend the logic to 1536 * quickly (compared to direct compression) detect data characteristics 1537 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible 1538 * data. 1539 * 1540 * The following types of analysis can be performed: 1541 * - detect mostly zero data 1542 * - detect data with low "byte set" size (text, etc) 1543 * - detect data with low/high "core byte" set 1544 * 1545 * Return non-zero if the compression should be done, 0 otherwise. 1546 */ 1547 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) 1548 { 1549 struct list_head *ws_list = __find_workspace(0, true); 1550 struct heuristic_ws *ws; 1551 u32 i; 1552 u8 byte; 1553 int ret = 0; 1554 1555 ws = list_entry(ws_list, struct heuristic_ws, list); 1556 1557 heuristic_collect_sample(inode, start, end, ws); 1558 1559 if (sample_repeated_patterns(ws)) { 1560 ret = 1; 1561 goto out; 1562 } 1563 1564 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); 1565 1566 for (i = 0; i < ws->sample_size; i++) { 1567 byte = ws->sample[i]; 1568 ws->bucket[byte].count++; 1569 } 1570 1571 i = byte_set_size(ws); 1572 if (i < BYTE_SET_THRESHOLD) { 1573 ret = 2; 1574 goto out; 1575 } 1576 1577 i = byte_core_set_size(ws); 1578 if (i <= BYTE_CORE_SET_LOW) { 1579 ret = 3; 1580 goto out; 1581 } 1582 1583 if (i >= BYTE_CORE_SET_HIGH) { 1584 ret = 0; 1585 goto out; 1586 } 1587 1588 i = shannon_entropy(ws); 1589 if (i <= ENTROPY_LVL_ACEPTABLE) { 1590 ret = 4; 1591 goto out; 1592 } 1593 1594 /* 1595 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be 1596 * needed to give green light to compression. 1597 * 1598 * For now just assume that compression at that level is not worth the 1599 * resources because: 1600 * 1601 * 1. it is possible to defrag the data later 1602 * 1603 * 2. the data would turn out to be hardly compressible, eg. 150 byte 1604 * values, every bucket has counter at level ~54. The heuristic would 1605 * be confused. This can happen when data have some internal repeated 1606 * patterns like "abbacbbc...". This can be detected by analyzing 1607 * pairs of bytes, which is too costly. 1608 */ 1609 if (i < ENTROPY_LVL_HIGH) { 1610 ret = 5; 1611 goto out; 1612 } else { 1613 ret = 0; 1614 goto out; 1615 } 1616 1617 out: 1618 __free_workspace(0, ws_list, true); 1619 return ret; 1620 } 1621 1622 unsigned int btrfs_compress_str2level(const char *str) 1623 { 1624 if (strncmp(str, "zlib", 4) != 0) 1625 return 0; 1626 1627 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ 1628 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) 1629 return str[5] - '0'; 1630 1631 return BTRFS_ZLIB_DEFAULT_LEVEL; 1632 } 1633