1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bio.h> 8 #include <linux/buffer_head.h> 9 #include <linux/file.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/highmem.h> 13 #include <linux/time.h> 14 #include <linux/init.h> 15 #include <linux/string.h> 16 #include <linux/backing-dev.h> 17 #include <linux/mpage.h> 18 #include <linux/swap.h> 19 #include <linux/writeback.h> 20 #include <linux/bit_spinlock.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/log2.h> 24 #include "ctree.h" 25 #include "disk-io.h" 26 #include "transaction.h" 27 #include "btrfs_inode.h" 28 #include "volumes.h" 29 #include "ordered-data.h" 30 #include "compression.h" 31 #include "extent_io.h" 32 #include "extent_map.h" 33 34 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; 35 36 const char* btrfs_compress_type2str(enum btrfs_compression_type type) 37 { 38 switch (type) { 39 case BTRFS_COMPRESS_ZLIB: 40 case BTRFS_COMPRESS_LZO: 41 case BTRFS_COMPRESS_ZSTD: 42 case BTRFS_COMPRESS_NONE: 43 return btrfs_compress_types[type]; 44 } 45 46 return NULL; 47 } 48 49 static int btrfs_decompress_bio(struct compressed_bio *cb); 50 51 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, 52 unsigned long disk_size) 53 { 54 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 55 56 return sizeof(struct compressed_bio) + 57 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; 58 } 59 60 static int check_compressed_csum(struct btrfs_inode *inode, 61 struct compressed_bio *cb, 62 u64 disk_start) 63 { 64 int ret; 65 struct page *page; 66 unsigned long i; 67 char *kaddr; 68 u32 csum; 69 u32 *cb_sum = &cb->sums; 70 71 if (inode->flags & BTRFS_INODE_NODATASUM) 72 return 0; 73 74 for (i = 0; i < cb->nr_pages; i++) { 75 page = cb->compressed_pages[i]; 76 csum = ~(u32)0; 77 78 kaddr = kmap_atomic(page); 79 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); 80 btrfs_csum_final(csum, (u8 *)&csum); 81 kunmap_atomic(kaddr); 82 83 if (csum != *cb_sum) { 84 btrfs_print_data_csum_error(inode, disk_start, csum, 85 *cb_sum, cb->mirror_num); 86 ret = -EIO; 87 goto fail; 88 } 89 cb_sum++; 90 91 } 92 ret = 0; 93 fail: 94 return ret; 95 } 96 97 /* when we finish reading compressed pages from the disk, we 98 * decompress them and then run the bio end_io routines on the 99 * decompressed pages (in the inode address space). 100 * 101 * This allows the checksumming and other IO error handling routines 102 * to work normally 103 * 104 * The compressed pages are freed here, and it must be run 105 * in process context 106 */ 107 static void end_compressed_bio_read(struct bio *bio) 108 { 109 struct compressed_bio *cb = bio->bi_private; 110 struct inode *inode; 111 struct page *page; 112 unsigned long index; 113 unsigned int mirror = btrfs_io_bio(bio)->mirror_num; 114 int ret = 0; 115 116 if (bio->bi_status) 117 cb->errors = 1; 118 119 /* if there are more bios still pending for this compressed 120 * extent, just exit 121 */ 122 if (!refcount_dec_and_test(&cb->pending_bios)) 123 goto out; 124 125 /* 126 * Record the correct mirror_num in cb->orig_bio so that 127 * read-repair can work properly. 128 */ 129 ASSERT(btrfs_io_bio(cb->orig_bio)); 130 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; 131 cb->mirror_num = mirror; 132 133 /* 134 * Some IO in this cb have failed, just skip checksum as there 135 * is no way it could be correct. 136 */ 137 if (cb->errors == 1) 138 goto csum_failed; 139 140 inode = cb->inode; 141 ret = check_compressed_csum(BTRFS_I(inode), cb, 142 (u64)bio->bi_iter.bi_sector << 9); 143 if (ret) 144 goto csum_failed; 145 146 /* ok, we're the last bio for this extent, lets start 147 * the decompression. 148 */ 149 ret = btrfs_decompress_bio(cb); 150 151 csum_failed: 152 if (ret) 153 cb->errors = 1; 154 155 /* release the compressed pages */ 156 index = 0; 157 for (index = 0; index < cb->nr_pages; index++) { 158 page = cb->compressed_pages[index]; 159 page->mapping = NULL; 160 put_page(page); 161 } 162 163 /* do io completion on the original bio */ 164 if (cb->errors) { 165 bio_io_error(cb->orig_bio); 166 } else { 167 int i; 168 struct bio_vec *bvec; 169 170 /* 171 * we have verified the checksum already, set page 172 * checked so the end_io handlers know about it 173 */ 174 ASSERT(!bio_flagged(bio, BIO_CLONED)); 175 bio_for_each_segment_all(bvec, cb->orig_bio, i) 176 SetPageChecked(bvec->bv_page); 177 178 bio_endio(cb->orig_bio); 179 } 180 181 /* finally free the cb struct */ 182 kfree(cb->compressed_pages); 183 kfree(cb); 184 out: 185 bio_put(bio); 186 } 187 188 /* 189 * Clear the writeback bits on all of the file 190 * pages for a compressed write 191 */ 192 static noinline void end_compressed_writeback(struct inode *inode, 193 const struct compressed_bio *cb) 194 { 195 unsigned long index = cb->start >> PAGE_SHIFT; 196 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; 197 struct page *pages[16]; 198 unsigned long nr_pages = end_index - index + 1; 199 int i; 200 int ret; 201 202 if (cb->errors) 203 mapping_set_error(inode->i_mapping, -EIO); 204 205 while (nr_pages > 0) { 206 ret = find_get_pages_contig(inode->i_mapping, index, 207 min_t(unsigned long, 208 nr_pages, ARRAY_SIZE(pages)), pages); 209 if (ret == 0) { 210 nr_pages -= 1; 211 index += 1; 212 continue; 213 } 214 for (i = 0; i < ret; i++) { 215 if (cb->errors) 216 SetPageError(pages[i]); 217 end_page_writeback(pages[i]); 218 put_page(pages[i]); 219 } 220 nr_pages -= ret; 221 index += ret; 222 } 223 /* the inode may be gone now */ 224 } 225 226 /* 227 * do the cleanup once all the compressed pages hit the disk. 228 * This will clear writeback on the file pages and free the compressed 229 * pages. 230 * 231 * This also calls the writeback end hooks for the file pages so that 232 * metadata and checksums can be updated in the file. 233 */ 234 static void end_compressed_bio_write(struct bio *bio) 235 { 236 struct extent_io_tree *tree; 237 struct compressed_bio *cb = bio->bi_private; 238 struct inode *inode; 239 struct page *page; 240 unsigned long index; 241 242 if (bio->bi_status) 243 cb->errors = 1; 244 245 /* if there are more bios still pending for this compressed 246 * extent, just exit 247 */ 248 if (!refcount_dec_and_test(&cb->pending_bios)) 249 goto out; 250 251 /* ok, we're the last bio for this extent, step one is to 252 * call back into the FS and do all the end_io operations 253 */ 254 inode = cb->inode; 255 tree = &BTRFS_I(inode)->io_tree; 256 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 257 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 258 cb->start, 259 cb->start + cb->len - 1, 260 NULL, 261 bio->bi_status ? 262 BLK_STS_OK : BLK_STS_NOTSUPP); 263 cb->compressed_pages[0]->mapping = NULL; 264 265 end_compressed_writeback(inode, cb); 266 /* note, our inode could be gone now */ 267 268 /* 269 * release the compressed pages, these came from alloc_page and 270 * are not attached to the inode at all 271 */ 272 index = 0; 273 for (index = 0; index < cb->nr_pages; index++) { 274 page = cb->compressed_pages[index]; 275 page->mapping = NULL; 276 put_page(page); 277 } 278 279 /* finally free the cb struct */ 280 kfree(cb->compressed_pages); 281 kfree(cb); 282 out: 283 bio_put(bio); 284 } 285 286 /* 287 * worker function to build and submit bios for previously compressed pages. 288 * The corresponding pages in the inode should be marked for writeback 289 * and the compressed pages should have a reference on them for dropping 290 * when the IO is complete. 291 * 292 * This also checksums the file bytes and gets things ready for 293 * the end io hooks. 294 */ 295 blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, 296 unsigned long len, u64 disk_start, 297 unsigned long compressed_len, 298 struct page **compressed_pages, 299 unsigned long nr_pages, 300 unsigned int write_flags) 301 { 302 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 303 struct bio *bio = NULL; 304 struct compressed_bio *cb; 305 unsigned long bytes_left; 306 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 307 int pg_index = 0; 308 struct page *page; 309 u64 first_byte = disk_start; 310 struct block_device *bdev; 311 blk_status_t ret; 312 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 313 314 WARN_ON(start & ((u64)PAGE_SIZE - 1)); 315 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); 316 if (!cb) 317 return BLK_STS_RESOURCE; 318 refcount_set(&cb->pending_bios, 0); 319 cb->errors = 0; 320 cb->inode = inode; 321 cb->start = start; 322 cb->len = len; 323 cb->mirror_num = 0; 324 cb->compressed_pages = compressed_pages; 325 cb->compressed_len = compressed_len; 326 cb->orig_bio = NULL; 327 cb->nr_pages = nr_pages; 328 329 bdev = fs_info->fs_devices->latest_bdev; 330 331 bio = btrfs_bio_alloc(bdev, first_byte); 332 bio->bi_opf = REQ_OP_WRITE | write_flags; 333 bio->bi_private = cb; 334 bio->bi_end_io = end_compressed_bio_write; 335 refcount_set(&cb->pending_bios, 1); 336 337 /* create and submit bios for the compressed pages */ 338 bytes_left = compressed_len; 339 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 340 int submit = 0; 341 342 page = compressed_pages[pg_index]; 343 page->mapping = inode->i_mapping; 344 if (bio->bi_iter.bi_size) 345 submit = io_tree->ops->merge_bio_hook(page, 0, 346 PAGE_SIZE, 347 bio, 0); 348 349 page->mapping = NULL; 350 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < 351 PAGE_SIZE) { 352 /* 353 * inc the count before we submit the bio so 354 * we know the end IO handler won't happen before 355 * we inc the count. Otherwise, the cb might get 356 * freed before we're done setting it up 357 */ 358 refcount_inc(&cb->pending_bios); 359 ret = btrfs_bio_wq_end_io(fs_info, bio, 360 BTRFS_WQ_ENDIO_DATA); 361 BUG_ON(ret); /* -ENOMEM */ 362 363 if (!skip_sum) { 364 ret = btrfs_csum_one_bio(inode, bio, start, 1); 365 BUG_ON(ret); /* -ENOMEM */ 366 } 367 368 ret = btrfs_map_bio(fs_info, bio, 0, 1); 369 if (ret) { 370 bio->bi_status = ret; 371 bio_endio(bio); 372 } 373 374 bio = btrfs_bio_alloc(bdev, first_byte); 375 bio->bi_opf = REQ_OP_WRITE | write_flags; 376 bio->bi_private = cb; 377 bio->bi_end_io = end_compressed_bio_write; 378 bio_add_page(bio, page, PAGE_SIZE, 0); 379 } 380 if (bytes_left < PAGE_SIZE) { 381 btrfs_info(fs_info, 382 "bytes left %lu compress len %lu nr %lu", 383 bytes_left, cb->compressed_len, cb->nr_pages); 384 } 385 bytes_left -= PAGE_SIZE; 386 first_byte += PAGE_SIZE; 387 cond_resched(); 388 } 389 390 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 391 BUG_ON(ret); /* -ENOMEM */ 392 393 if (!skip_sum) { 394 ret = btrfs_csum_one_bio(inode, bio, start, 1); 395 BUG_ON(ret); /* -ENOMEM */ 396 } 397 398 ret = btrfs_map_bio(fs_info, bio, 0, 1); 399 if (ret) { 400 bio->bi_status = ret; 401 bio_endio(bio); 402 } 403 404 return 0; 405 } 406 407 static u64 bio_end_offset(struct bio *bio) 408 { 409 struct bio_vec *last = bio_last_bvec_all(bio); 410 411 return page_offset(last->bv_page) + last->bv_len + last->bv_offset; 412 } 413 414 static noinline int add_ra_bio_pages(struct inode *inode, 415 u64 compressed_end, 416 struct compressed_bio *cb) 417 { 418 unsigned long end_index; 419 unsigned long pg_index; 420 u64 last_offset; 421 u64 isize = i_size_read(inode); 422 int ret; 423 struct page *page; 424 unsigned long nr_pages = 0; 425 struct extent_map *em; 426 struct address_space *mapping = inode->i_mapping; 427 struct extent_map_tree *em_tree; 428 struct extent_io_tree *tree; 429 u64 end; 430 int misses = 0; 431 432 last_offset = bio_end_offset(cb->orig_bio); 433 em_tree = &BTRFS_I(inode)->extent_tree; 434 tree = &BTRFS_I(inode)->io_tree; 435 436 if (isize == 0) 437 return 0; 438 439 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; 440 441 while (last_offset < compressed_end) { 442 pg_index = last_offset >> PAGE_SHIFT; 443 444 if (pg_index > end_index) 445 break; 446 447 rcu_read_lock(); 448 page = radix_tree_lookup(&mapping->i_pages, pg_index); 449 rcu_read_unlock(); 450 if (page && !radix_tree_exceptional_entry(page)) { 451 misses++; 452 if (misses > 4) 453 break; 454 goto next; 455 } 456 457 page = __page_cache_alloc(mapping_gfp_constraint(mapping, 458 ~__GFP_FS)); 459 if (!page) 460 break; 461 462 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 463 put_page(page); 464 goto next; 465 } 466 467 end = last_offset + PAGE_SIZE - 1; 468 /* 469 * at this point, we have a locked page in the page cache 470 * for these bytes in the file. But, we have to make 471 * sure they map to this compressed extent on disk. 472 */ 473 set_page_extent_mapped(page); 474 lock_extent(tree, last_offset, end); 475 read_lock(&em_tree->lock); 476 em = lookup_extent_mapping(em_tree, last_offset, 477 PAGE_SIZE); 478 read_unlock(&em_tree->lock); 479 480 if (!em || last_offset < em->start || 481 (last_offset + PAGE_SIZE > extent_map_end(em)) || 482 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 483 free_extent_map(em); 484 unlock_extent(tree, last_offset, end); 485 unlock_page(page); 486 put_page(page); 487 break; 488 } 489 free_extent_map(em); 490 491 if (page->index == end_index) { 492 char *userpage; 493 size_t zero_offset = isize & (PAGE_SIZE - 1); 494 495 if (zero_offset) { 496 int zeros; 497 zeros = PAGE_SIZE - zero_offset; 498 userpage = kmap_atomic(page); 499 memset(userpage + zero_offset, 0, zeros); 500 flush_dcache_page(page); 501 kunmap_atomic(userpage); 502 } 503 } 504 505 ret = bio_add_page(cb->orig_bio, page, 506 PAGE_SIZE, 0); 507 508 if (ret == PAGE_SIZE) { 509 nr_pages++; 510 put_page(page); 511 } else { 512 unlock_extent(tree, last_offset, end); 513 unlock_page(page); 514 put_page(page); 515 break; 516 } 517 next: 518 last_offset += PAGE_SIZE; 519 } 520 return 0; 521 } 522 523 /* 524 * for a compressed read, the bio we get passed has all the inode pages 525 * in it. We don't actually do IO on those pages but allocate new ones 526 * to hold the compressed pages on disk. 527 * 528 * bio->bi_iter.bi_sector points to the compressed extent on disk 529 * bio->bi_io_vec points to all of the inode pages 530 * 531 * After the compressed pages are read, we copy the bytes into the 532 * bio we were passed and then call the bio end_io calls 533 */ 534 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 535 int mirror_num, unsigned long bio_flags) 536 { 537 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 538 struct extent_io_tree *tree; 539 struct extent_map_tree *em_tree; 540 struct compressed_bio *cb; 541 unsigned long compressed_len; 542 unsigned long nr_pages; 543 unsigned long pg_index; 544 struct page *page; 545 struct block_device *bdev; 546 struct bio *comp_bio; 547 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 548 u64 em_len; 549 u64 em_start; 550 struct extent_map *em; 551 blk_status_t ret = BLK_STS_RESOURCE; 552 int faili = 0; 553 u32 *sums; 554 555 tree = &BTRFS_I(inode)->io_tree; 556 em_tree = &BTRFS_I(inode)->extent_tree; 557 558 /* we need the actual starting offset of this extent in the file */ 559 read_lock(&em_tree->lock); 560 em = lookup_extent_mapping(em_tree, 561 page_offset(bio_first_page_all(bio)), 562 PAGE_SIZE); 563 read_unlock(&em_tree->lock); 564 if (!em) 565 return BLK_STS_IOERR; 566 567 compressed_len = em->block_len; 568 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); 569 if (!cb) 570 goto out; 571 572 refcount_set(&cb->pending_bios, 0); 573 cb->errors = 0; 574 cb->inode = inode; 575 cb->mirror_num = mirror_num; 576 sums = &cb->sums; 577 578 cb->start = em->orig_start; 579 em_len = em->len; 580 em_start = em->start; 581 582 free_extent_map(em); 583 em = NULL; 584 585 cb->len = bio->bi_iter.bi_size; 586 cb->compressed_len = compressed_len; 587 cb->compress_type = extent_compress_type(bio_flags); 588 cb->orig_bio = bio; 589 590 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); 591 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 592 GFP_NOFS); 593 if (!cb->compressed_pages) 594 goto fail1; 595 596 bdev = fs_info->fs_devices->latest_bdev; 597 598 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 599 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | 600 __GFP_HIGHMEM); 601 if (!cb->compressed_pages[pg_index]) { 602 faili = pg_index - 1; 603 ret = BLK_STS_RESOURCE; 604 goto fail2; 605 } 606 } 607 faili = nr_pages - 1; 608 cb->nr_pages = nr_pages; 609 610 add_ra_bio_pages(inode, em_start + em_len, cb); 611 612 /* include any pages we added in add_ra-bio_pages */ 613 cb->len = bio->bi_iter.bi_size; 614 615 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); 616 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); 617 comp_bio->bi_private = cb; 618 comp_bio->bi_end_io = end_compressed_bio_read; 619 refcount_set(&cb->pending_bios, 1); 620 621 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 622 int submit = 0; 623 624 page = cb->compressed_pages[pg_index]; 625 page->mapping = inode->i_mapping; 626 page->index = em_start >> PAGE_SHIFT; 627 628 if (comp_bio->bi_iter.bi_size) 629 submit = tree->ops->merge_bio_hook(page, 0, 630 PAGE_SIZE, 631 comp_bio, 0); 632 633 page->mapping = NULL; 634 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 635 PAGE_SIZE) { 636 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, 637 BTRFS_WQ_ENDIO_DATA); 638 BUG_ON(ret); /* -ENOMEM */ 639 640 /* 641 * inc the count before we submit the bio so 642 * we know the end IO handler won't happen before 643 * we inc the count. Otherwise, the cb might get 644 * freed before we're done setting it up 645 */ 646 refcount_inc(&cb->pending_bios); 647 648 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 649 ret = btrfs_lookup_bio_sums(inode, comp_bio, 650 sums); 651 BUG_ON(ret); /* -ENOMEM */ 652 } 653 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, 654 fs_info->sectorsize); 655 656 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 657 if (ret) { 658 comp_bio->bi_status = ret; 659 bio_endio(comp_bio); 660 } 661 662 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); 663 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); 664 comp_bio->bi_private = cb; 665 comp_bio->bi_end_io = end_compressed_bio_read; 666 667 bio_add_page(comp_bio, page, PAGE_SIZE, 0); 668 } 669 cur_disk_byte += PAGE_SIZE; 670 } 671 672 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); 673 BUG_ON(ret); /* -ENOMEM */ 674 675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 676 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); 677 BUG_ON(ret); /* -ENOMEM */ 678 } 679 680 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 681 if (ret) { 682 comp_bio->bi_status = ret; 683 bio_endio(comp_bio); 684 } 685 686 return 0; 687 688 fail2: 689 while (faili >= 0) { 690 __free_page(cb->compressed_pages[faili]); 691 faili--; 692 } 693 694 kfree(cb->compressed_pages); 695 fail1: 696 kfree(cb); 697 out: 698 free_extent_map(em); 699 return ret; 700 } 701 702 /* 703 * Heuristic uses systematic sampling to collect data from the input data 704 * range, the logic can be tuned by the following constants: 705 * 706 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample 707 * @SAMPLING_INTERVAL - range from which the sampled data can be collected 708 */ 709 #define SAMPLING_READ_SIZE (16) 710 #define SAMPLING_INTERVAL (256) 711 712 /* 713 * For statistical analysis of the input data we consider bytes that form a 714 * Galois Field of 256 objects. Each object has an attribute count, ie. how 715 * many times the object appeared in the sample. 716 */ 717 #define BUCKET_SIZE (256) 718 719 /* 720 * The size of the sample is based on a statistical sampling rule of thumb. 721 * The common way is to perform sampling tests as long as the number of 722 * elements in each cell is at least 5. 723 * 724 * Instead of 5, we choose 32 to obtain more accurate results. 725 * If the data contain the maximum number of symbols, which is 256, we obtain a 726 * sample size bound by 8192. 727 * 728 * For a sample of at most 8KB of data per data range: 16 consecutive bytes 729 * from up to 512 locations. 730 */ 731 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ 732 SAMPLING_READ_SIZE / SAMPLING_INTERVAL) 733 734 struct bucket_item { 735 u32 count; 736 }; 737 738 struct heuristic_ws { 739 /* Partial copy of input data */ 740 u8 *sample; 741 u32 sample_size; 742 /* Buckets store counters for each byte value */ 743 struct bucket_item *bucket; 744 /* Sorting buffer */ 745 struct bucket_item *bucket_b; 746 struct list_head list; 747 }; 748 749 static void free_heuristic_ws(struct list_head *ws) 750 { 751 struct heuristic_ws *workspace; 752 753 workspace = list_entry(ws, struct heuristic_ws, list); 754 755 kvfree(workspace->sample); 756 kfree(workspace->bucket); 757 kfree(workspace->bucket_b); 758 kfree(workspace); 759 } 760 761 static struct list_head *alloc_heuristic_ws(void) 762 { 763 struct heuristic_ws *ws; 764 765 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 766 if (!ws) 767 return ERR_PTR(-ENOMEM); 768 769 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); 770 if (!ws->sample) 771 goto fail; 772 773 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); 774 if (!ws->bucket) 775 goto fail; 776 777 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); 778 if (!ws->bucket_b) 779 goto fail; 780 781 INIT_LIST_HEAD(&ws->list); 782 return &ws->list; 783 fail: 784 free_heuristic_ws(&ws->list); 785 return ERR_PTR(-ENOMEM); 786 } 787 788 struct workspaces_list { 789 struct list_head idle_ws; 790 spinlock_t ws_lock; 791 /* Number of free workspaces */ 792 int free_ws; 793 /* Total number of allocated workspaces */ 794 atomic_t total_ws; 795 /* Waiters for a free workspace */ 796 wait_queue_head_t ws_wait; 797 }; 798 799 static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; 800 801 static struct workspaces_list btrfs_heuristic_ws; 802 803 static const struct btrfs_compress_op * const btrfs_compress_op[] = { 804 &btrfs_zlib_compress, 805 &btrfs_lzo_compress, 806 &btrfs_zstd_compress, 807 }; 808 809 void __init btrfs_init_compress(void) 810 { 811 struct list_head *workspace; 812 int i; 813 814 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); 815 spin_lock_init(&btrfs_heuristic_ws.ws_lock); 816 atomic_set(&btrfs_heuristic_ws.total_ws, 0); 817 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); 818 819 workspace = alloc_heuristic_ws(); 820 if (IS_ERR(workspace)) { 821 pr_warn( 822 "BTRFS: cannot preallocate heuristic workspace, will try later\n"); 823 } else { 824 atomic_set(&btrfs_heuristic_ws.total_ws, 1); 825 btrfs_heuristic_ws.free_ws = 1; 826 list_add(workspace, &btrfs_heuristic_ws.idle_ws); 827 } 828 829 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 830 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); 831 spin_lock_init(&btrfs_comp_ws[i].ws_lock); 832 atomic_set(&btrfs_comp_ws[i].total_ws, 0); 833 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); 834 835 /* 836 * Preallocate one workspace for each compression type so 837 * we can guarantee forward progress in the worst case 838 */ 839 workspace = btrfs_compress_op[i]->alloc_workspace(); 840 if (IS_ERR(workspace)) { 841 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); 842 } else { 843 atomic_set(&btrfs_comp_ws[i].total_ws, 1); 844 btrfs_comp_ws[i].free_ws = 1; 845 list_add(workspace, &btrfs_comp_ws[i].idle_ws); 846 } 847 } 848 } 849 850 /* 851 * This finds an available workspace or allocates a new one. 852 * If it's not possible to allocate a new one, waits until there's one. 853 * Preallocation makes a forward progress guarantees and we do not return 854 * errors. 855 */ 856 static struct list_head *__find_workspace(int type, bool heuristic) 857 { 858 struct list_head *workspace; 859 int cpus = num_online_cpus(); 860 int idx = type - 1; 861 unsigned nofs_flag; 862 struct list_head *idle_ws; 863 spinlock_t *ws_lock; 864 atomic_t *total_ws; 865 wait_queue_head_t *ws_wait; 866 int *free_ws; 867 868 if (heuristic) { 869 idle_ws = &btrfs_heuristic_ws.idle_ws; 870 ws_lock = &btrfs_heuristic_ws.ws_lock; 871 total_ws = &btrfs_heuristic_ws.total_ws; 872 ws_wait = &btrfs_heuristic_ws.ws_wait; 873 free_ws = &btrfs_heuristic_ws.free_ws; 874 } else { 875 idle_ws = &btrfs_comp_ws[idx].idle_ws; 876 ws_lock = &btrfs_comp_ws[idx].ws_lock; 877 total_ws = &btrfs_comp_ws[idx].total_ws; 878 ws_wait = &btrfs_comp_ws[idx].ws_wait; 879 free_ws = &btrfs_comp_ws[idx].free_ws; 880 } 881 882 again: 883 spin_lock(ws_lock); 884 if (!list_empty(idle_ws)) { 885 workspace = idle_ws->next; 886 list_del(workspace); 887 (*free_ws)--; 888 spin_unlock(ws_lock); 889 return workspace; 890 891 } 892 if (atomic_read(total_ws) > cpus) { 893 DEFINE_WAIT(wait); 894 895 spin_unlock(ws_lock); 896 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); 897 if (atomic_read(total_ws) > cpus && !*free_ws) 898 schedule(); 899 finish_wait(ws_wait, &wait); 900 goto again; 901 } 902 atomic_inc(total_ws); 903 spin_unlock(ws_lock); 904 905 /* 906 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have 907 * to turn it off here because we might get called from the restricted 908 * context of btrfs_compress_bio/btrfs_compress_pages 909 */ 910 nofs_flag = memalloc_nofs_save(); 911 if (heuristic) 912 workspace = alloc_heuristic_ws(); 913 else 914 workspace = btrfs_compress_op[idx]->alloc_workspace(); 915 memalloc_nofs_restore(nofs_flag); 916 917 if (IS_ERR(workspace)) { 918 atomic_dec(total_ws); 919 wake_up(ws_wait); 920 921 /* 922 * Do not return the error but go back to waiting. There's a 923 * workspace preallocated for each type and the compression 924 * time is bounded so we get to a workspace eventually. This 925 * makes our caller's life easier. 926 * 927 * To prevent silent and low-probability deadlocks (when the 928 * initial preallocation fails), check if there are any 929 * workspaces at all. 930 */ 931 if (atomic_read(total_ws) == 0) { 932 static DEFINE_RATELIMIT_STATE(_rs, 933 /* once per minute */ 60 * HZ, 934 /* no burst */ 1); 935 936 if (__ratelimit(&_rs)) { 937 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); 938 } 939 } 940 goto again; 941 } 942 return workspace; 943 } 944 945 static struct list_head *find_workspace(int type) 946 { 947 return __find_workspace(type, false); 948 } 949 950 /* 951 * put a workspace struct back on the list or free it if we have enough 952 * idle ones sitting around 953 */ 954 static void __free_workspace(int type, struct list_head *workspace, 955 bool heuristic) 956 { 957 int idx = type - 1; 958 struct list_head *idle_ws; 959 spinlock_t *ws_lock; 960 atomic_t *total_ws; 961 wait_queue_head_t *ws_wait; 962 int *free_ws; 963 964 if (heuristic) { 965 idle_ws = &btrfs_heuristic_ws.idle_ws; 966 ws_lock = &btrfs_heuristic_ws.ws_lock; 967 total_ws = &btrfs_heuristic_ws.total_ws; 968 ws_wait = &btrfs_heuristic_ws.ws_wait; 969 free_ws = &btrfs_heuristic_ws.free_ws; 970 } else { 971 idle_ws = &btrfs_comp_ws[idx].idle_ws; 972 ws_lock = &btrfs_comp_ws[idx].ws_lock; 973 total_ws = &btrfs_comp_ws[idx].total_ws; 974 ws_wait = &btrfs_comp_ws[idx].ws_wait; 975 free_ws = &btrfs_comp_ws[idx].free_ws; 976 } 977 978 spin_lock(ws_lock); 979 if (*free_ws <= num_online_cpus()) { 980 list_add(workspace, idle_ws); 981 (*free_ws)++; 982 spin_unlock(ws_lock); 983 goto wake; 984 } 985 spin_unlock(ws_lock); 986 987 if (heuristic) 988 free_heuristic_ws(workspace); 989 else 990 btrfs_compress_op[idx]->free_workspace(workspace); 991 atomic_dec(total_ws); 992 wake: 993 cond_wake_up(ws_wait); 994 } 995 996 static void free_workspace(int type, struct list_head *ws) 997 { 998 return __free_workspace(type, ws, false); 999 } 1000 1001 /* 1002 * cleanup function for module exit 1003 */ 1004 static void free_workspaces(void) 1005 { 1006 struct list_head *workspace; 1007 int i; 1008 1009 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { 1010 workspace = btrfs_heuristic_ws.idle_ws.next; 1011 list_del(workspace); 1012 free_heuristic_ws(workspace); 1013 atomic_dec(&btrfs_heuristic_ws.total_ws); 1014 } 1015 1016 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 1017 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { 1018 workspace = btrfs_comp_ws[i].idle_ws.next; 1019 list_del(workspace); 1020 btrfs_compress_op[i]->free_workspace(workspace); 1021 atomic_dec(&btrfs_comp_ws[i].total_ws); 1022 } 1023 } 1024 } 1025 1026 /* 1027 * Given an address space and start and length, compress the bytes into @pages 1028 * that are allocated on demand. 1029 * 1030 * @type_level is encoded algorithm and level, where level 0 means whatever 1031 * default the algorithm chooses and is opaque here; 1032 * - compression algo are 0-3 1033 * - the level are bits 4-7 1034 * 1035 * @out_pages is an in/out parameter, holds maximum number of pages to allocate 1036 * and returns number of actually allocated pages 1037 * 1038 * @total_in is used to return the number of bytes actually read. It 1039 * may be smaller than the input length if we had to exit early because we 1040 * ran out of room in the pages array or because we cross the 1041 * max_out threshold. 1042 * 1043 * @total_out is an in/out parameter, must be set to the input length and will 1044 * be also used to return the total number of compressed bytes 1045 * 1046 * @max_out tells us the max number of bytes that we're allowed to 1047 * stuff into pages 1048 */ 1049 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, 1050 u64 start, struct page **pages, 1051 unsigned long *out_pages, 1052 unsigned long *total_in, 1053 unsigned long *total_out) 1054 { 1055 struct list_head *workspace; 1056 int ret; 1057 int type = type_level & 0xF; 1058 1059 workspace = find_workspace(type); 1060 1061 btrfs_compress_op[type - 1]->set_level(workspace, type_level); 1062 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 1063 start, pages, 1064 out_pages, 1065 total_in, total_out); 1066 free_workspace(type, workspace); 1067 return ret; 1068 } 1069 1070 /* 1071 * pages_in is an array of pages with compressed data. 1072 * 1073 * disk_start is the starting logical offset of this array in the file 1074 * 1075 * orig_bio contains the pages from the file that we want to decompress into 1076 * 1077 * srclen is the number of bytes in pages_in 1078 * 1079 * The basic idea is that we have a bio that was created by readpages. 1080 * The pages in the bio are for the uncompressed data, and they may not 1081 * be contiguous. They all correspond to the range of bytes covered by 1082 * the compressed extent. 1083 */ 1084 static int btrfs_decompress_bio(struct compressed_bio *cb) 1085 { 1086 struct list_head *workspace; 1087 int ret; 1088 int type = cb->compress_type; 1089 1090 workspace = find_workspace(type); 1091 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); 1092 free_workspace(type, workspace); 1093 1094 return ret; 1095 } 1096 1097 /* 1098 * a less complex decompression routine. Our compressed data fits in a 1099 * single page, and we want to read a single page out of it. 1100 * start_byte tells us the offset into the compressed data we're interested in 1101 */ 1102 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 1103 unsigned long start_byte, size_t srclen, size_t destlen) 1104 { 1105 struct list_head *workspace; 1106 int ret; 1107 1108 workspace = find_workspace(type); 1109 1110 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 1111 dest_page, start_byte, 1112 srclen, destlen); 1113 1114 free_workspace(type, workspace); 1115 return ret; 1116 } 1117 1118 void __cold btrfs_exit_compress(void) 1119 { 1120 free_workspaces(); 1121 } 1122 1123 /* 1124 * Copy uncompressed data from working buffer to pages. 1125 * 1126 * buf_start is the byte offset we're of the start of our workspace buffer. 1127 * 1128 * total_out is the last byte of the buffer 1129 */ 1130 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, 1131 unsigned long total_out, u64 disk_start, 1132 struct bio *bio) 1133 { 1134 unsigned long buf_offset; 1135 unsigned long current_buf_start; 1136 unsigned long start_byte; 1137 unsigned long prev_start_byte; 1138 unsigned long working_bytes = total_out - buf_start; 1139 unsigned long bytes; 1140 char *kaddr; 1141 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); 1142 1143 /* 1144 * start byte is the first byte of the page we're currently 1145 * copying into relative to the start of the compressed data. 1146 */ 1147 start_byte = page_offset(bvec.bv_page) - disk_start; 1148 1149 /* we haven't yet hit data corresponding to this page */ 1150 if (total_out <= start_byte) 1151 return 1; 1152 1153 /* 1154 * the start of the data we care about is offset into 1155 * the middle of our working buffer 1156 */ 1157 if (total_out > start_byte && buf_start < start_byte) { 1158 buf_offset = start_byte - buf_start; 1159 working_bytes -= buf_offset; 1160 } else { 1161 buf_offset = 0; 1162 } 1163 current_buf_start = buf_start; 1164 1165 /* copy bytes from the working buffer into the pages */ 1166 while (working_bytes > 0) { 1167 bytes = min_t(unsigned long, bvec.bv_len, 1168 PAGE_SIZE - buf_offset); 1169 bytes = min(bytes, working_bytes); 1170 1171 kaddr = kmap_atomic(bvec.bv_page); 1172 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); 1173 kunmap_atomic(kaddr); 1174 flush_dcache_page(bvec.bv_page); 1175 1176 buf_offset += bytes; 1177 working_bytes -= bytes; 1178 current_buf_start += bytes; 1179 1180 /* check if we need to pick another page */ 1181 bio_advance(bio, bytes); 1182 if (!bio->bi_iter.bi_size) 1183 return 0; 1184 bvec = bio_iter_iovec(bio, bio->bi_iter); 1185 prev_start_byte = start_byte; 1186 start_byte = page_offset(bvec.bv_page) - disk_start; 1187 1188 /* 1189 * We need to make sure we're only adjusting 1190 * our offset into compression working buffer when 1191 * we're switching pages. Otherwise we can incorrectly 1192 * keep copying when we were actually done. 1193 */ 1194 if (start_byte != prev_start_byte) { 1195 /* 1196 * make sure our new page is covered by this 1197 * working buffer 1198 */ 1199 if (total_out <= start_byte) 1200 return 1; 1201 1202 /* 1203 * the next page in the biovec might not be adjacent 1204 * to the last page, but it might still be found 1205 * inside this working buffer. bump our offset pointer 1206 */ 1207 if (total_out > start_byte && 1208 current_buf_start < start_byte) { 1209 buf_offset = start_byte - buf_start; 1210 working_bytes = total_out - start_byte; 1211 current_buf_start = buf_start + buf_offset; 1212 } 1213 } 1214 } 1215 1216 return 1; 1217 } 1218 1219 /* 1220 * Shannon Entropy calculation 1221 * 1222 * Pure byte distribution analysis fails to determine compressiability of data. 1223 * Try calculating entropy to estimate the average minimum number of bits 1224 * needed to encode the sampled data. 1225 * 1226 * For convenience, return the percentage of needed bits, instead of amount of 1227 * bits directly. 1228 * 1229 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy 1230 * and can be compressible with high probability 1231 * 1232 * @ENTROPY_LVL_HIGH - data are not compressible with high probability 1233 * 1234 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. 1235 */ 1236 #define ENTROPY_LVL_ACEPTABLE (65) 1237 #define ENTROPY_LVL_HIGH (80) 1238 1239 /* 1240 * For increasead precision in shannon_entropy calculation, 1241 * let's do pow(n, M) to save more digits after comma: 1242 * 1243 * - maximum int bit length is 64 1244 * - ilog2(MAX_SAMPLE_SIZE) -> 13 1245 * - 13 * 4 = 52 < 64 -> M = 4 1246 * 1247 * So use pow(n, 4). 1248 */ 1249 static inline u32 ilog2_w(u64 n) 1250 { 1251 return ilog2(n * n * n * n); 1252 } 1253 1254 static u32 shannon_entropy(struct heuristic_ws *ws) 1255 { 1256 const u32 entropy_max = 8 * ilog2_w(2); 1257 u32 entropy_sum = 0; 1258 u32 p, p_base, sz_base; 1259 u32 i; 1260 1261 sz_base = ilog2_w(ws->sample_size); 1262 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { 1263 p = ws->bucket[i].count; 1264 p_base = ilog2_w(p); 1265 entropy_sum += p * (sz_base - p_base); 1266 } 1267 1268 entropy_sum /= ws->sample_size; 1269 return entropy_sum * 100 / entropy_max; 1270 } 1271 1272 #define RADIX_BASE 4U 1273 #define COUNTERS_SIZE (1U << RADIX_BASE) 1274 1275 static u8 get4bits(u64 num, int shift) { 1276 u8 low4bits; 1277 1278 num >>= shift; 1279 /* Reverse order */ 1280 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); 1281 return low4bits; 1282 } 1283 1284 /* 1285 * Use 4 bits as radix base 1286 * Use 16 u32 counters for calculating new possition in buf array 1287 * 1288 * @array - array that will be sorted 1289 * @array_buf - buffer array to store sorting results 1290 * must be equal in size to @array 1291 * @num - array size 1292 */ 1293 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, 1294 int num) 1295 { 1296 u64 max_num; 1297 u64 buf_num; 1298 u32 counters[COUNTERS_SIZE]; 1299 u32 new_addr; 1300 u32 addr; 1301 int bitlen; 1302 int shift; 1303 int i; 1304 1305 /* 1306 * Try avoid useless loop iterations for small numbers stored in big 1307 * counters. Example: 48 33 4 ... in 64bit array 1308 */ 1309 max_num = array[0].count; 1310 for (i = 1; i < num; i++) { 1311 buf_num = array[i].count; 1312 if (buf_num > max_num) 1313 max_num = buf_num; 1314 } 1315 1316 buf_num = ilog2(max_num); 1317 bitlen = ALIGN(buf_num, RADIX_BASE * 2); 1318 1319 shift = 0; 1320 while (shift < bitlen) { 1321 memset(counters, 0, sizeof(counters)); 1322 1323 for (i = 0; i < num; i++) { 1324 buf_num = array[i].count; 1325 addr = get4bits(buf_num, shift); 1326 counters[addr]++; 1327 } 1328 1329 for (i = 1; i < COUNTERS_SIZE; i++) 1330 counters[i] += counters[i - 1]; 1331 1332 for (i = num - 1; i >= 0; i--) { 1333 buf_num = array[i].count; 1334 addr = get4bits(buf_num, shift); 1335 counters[addr]--; 1336 new_addr = counters[addr]; 1337 array_buf[new_addr] = array[i]; 1338 } 1339 1340 shift += RADIX_BASE; 1341 1342 /* 1343 * Normal radix expects to move data from a temporary array, to 1344 * the main one. But that requires some CPU time. Avoid that 1345 * by doing another sort iteration to original array instead of 1346 * memcpy() 1347 */ 1348 memset(counters, 0, sizeof(counters)); 1349 1350 for (i = 0; i < num; i ++) { 1351 buf_num = array_buf[i].count; 1352 addr = get4bits(buf_num, shift); 1353 counters[addr]++; 1354 } 1355 1356 for (i = 1; i < COUNTERS_SIZE; i++) 1357 counters[i] += counters[i - 1]; 1358 1359 for (i = num - 1; i >= 0; i--) { 1360 buf_num = array_buf[i].count; 1361 addr = get4bits(buf_num, shift); 1362 counters[addr]--; 1363 new_addr = counters[addr]; 1364 array[new_addr] = array_buf[i]; 1365 } 1366 1367 shift += RADIX_BASE; 1368 } 1369 } 1370 1371 /* 1372 * Size of the core byte set - how many bytes cover 90% of the sample 1373 * 1374 * There are several types of structured binary data that use nearly all byte 1375 * values. The distribution can be uniform and counts in all buckets will be 1376 * nearly the same (eg. encrypted data). Unlikely to be compressible. 1377 * 1378 * Other possibility is normal (Gaussian) distribution, where the data could 1379 * be potentially compressible, but we have to take a few more steps to decide 1380 * how much. 1381 * 1382 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, 1383 * compression algo can easy fix that 1384 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high 1385 * probability is not compressible 1386 */ 1387 #define BYTE_CORE_SET_LOW (64) 1388 #define BYTE_CORE_SET_HIGH (200) 1389 1390 static int byte_core_set_size(struct heuristic_ws *ws) 1391 { 1392 u32 i; 1393 u32 coreset_sum = 0; 1394 const u32 core_set_threshold = ws->sample_size * 90 / 100; 1395 struct bucket_item *bucket = ws->bucket; 1396 1397 /* Sort in reverse order */ 1398 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); 1399 1400 for (i = 0; i < BYTE_CORE_SET_LOW; i++) 1401 coreset_sum += bucket[i].count; 1402 1403 if (coreset_sum > core_set_threshold) 1404 return i; 1405 1406 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { 1407 coreset_sum += bucket[i].count; 1408 if (coreset_sum > core_set_threshold) 1409 break; 1410 } 1411 1412 return i; 1413 } 1414 1415 /* 1416 * Count byte values in buckets. 1417 * This heuristic can detect textual data (configs, xml, json, html, etc). 1418 * Because in most text-like data byte set is restricted to limited number of 1419 * possible characters, and that restriction in most cases makes data easy to 1420 * compress. 1421 * 1422 * @BYTE_SET_THRESHOLD - consider all data within this byte set size: 1423 * less - compressible 1424 * more - need additional analysis 1425 */ 1426 #define BYTE_SET_THRESHOLD (64) 1427 1428 static u32 byte_set_size(const struct heuristic_ws *ws) 1429 { 1430 u32 i; 1431 u32 byte_set_size = 0; 1432 1433 for (i = 0; i < BYTE_SET_THRESHOLD; i++) { 1434 if (ws->bucket[i].count > 0) 1435 byte_set_size++; 1436 } 1437 1438 /* 1439 * Continue collecting count of byte values in buckets. If the byte 1440 * set size is bigger then the threshold, it's pointless to continue, 1441 * the detection technique would fail for this type of data. 1442 */ 1443 for (; i < BUCKET_SIZE; i++) { 1444 if (ws->bucket[i].count > 0) { 1445 byte_set_size++; 1446 if (byte_set_size > BYTE_SET_THRESHOLD) 1447 return byte_set_size; 1448 } 1449 } 1450 1451 return byte_set_size; 1452 } 1453 1454 static bool sample_repeated_patterns(struct heuristic_ws *ws) 1455 { 1456 const u32 half_of_sample = ws->sample_size / 2; 1457 const u8 *data = ws->sample; 1458 1459 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; 1460 } 1461 1462 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, 1463 struct heuristic_ws *ws) 1464 { 1465 struct page *page; 1466 u64 index, index_end; 1467 u32 i, curr_sample_pos; 1468 u8 *in_data; 1469 1470 /* 1471 * Compression handles the input data by chunks of 128KiB 1472 * (defined by BTRFS_MAX_UNCOMPRESSED) 1473 * 1474 * We do the same for the heuristic and loop over the whole range. 1475 * 1476 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will 1477 * process no more than BTRFS_MAX_UNCOMPRESSED at a time. 1478 */ 1479 if (end - start > BTRFS_MAX_UNCOMPRESSED) 1480 end = start + BTRFS_MAX_UNCOMPRESSED; 1481 1482 index = start >> PAGE_SHIFT; 1483 index_end = end >> PAGE_SHIFT; 1484 1485 /* Don't miss unaligned end */ 1486 if (!IS_ALIGNED(end, PAGE_SIZE)) 1487 index_end++; 1488 1489 curr_sample_pos = 0; 1490 while (index < index_end) { 1491 page = find_get_page(inode->i_mapping, index); 1492 in_data = kmap(page); 1493 /* Handle case where the start is not aligned to PAGE_SIZE */ 1494 i = start % PAGE_SIZE; 1495 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { 1496 /* Don't sample any garbage from the last page */ 1497 if (start > end - SAMPLING_READ_SIZE) 1498 break; 1499 memcpy(&ws->sample[curr_sample_pos], &in_data[i], 1500 SAMPLING_READ_SIZE); 1501 i += SAMPLING_INTERVAL; 1502 start += SAMPLING_INTERVAL; 1503 curr_sample_pos += SAMPLING_READ_SIZE; 1504 } 1505 kunmap(page); 1506 put_page(page); 1507 1508 index++; 1509 } 1510 1511 ws->sample_size = curr_sample_pos; 1512 } 1513 1514 /* 1515 * Compression heuristic. 1516 * 1517 * For now is's a naive and optimistic 'return true', we'll extend the logic to 1518 * quickly (compared to direct compression) detect data characteristics 1519 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible 1520 * data. 1521 * 1522 * The following types of analysis can be performed: 1523 * - detect mostly zero data 1524 * - detect data with low "byte set" size (text, etc) 1525 * - detect data with low/high "core byte" set 1526 * 1527 * Return non-zero if the compression should be done, 0 otherwise. 1528 */ 1529 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) 1530 { 1531 struct list_head *ws_list = __find_workspace(0, true); 1532 struct heuristic_ws *ws; 1533 u32 i; 1534 u8 byte; 1535 int ret = 0; 1536 1537 ws = list_entry(ws_list, struct heuristic_ws, list); 1538 1539 heuristic_collect_sample(inode, start, end, ws); 1540 1541 if (sample_repeated_patterns(ws)) { 1542 ret = 1; 1543 goto out; 1544 } 1545 1546 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); 1547 1548 for (i = 0; i < ws->sample_size; i++) { 1549 byte = ws->sample[i]; 1550 ws->bucket[byte].count++; 1551 } 1552 1553 i = byte_set_size(ws); 1554 if (i < BYTE_SET_THRESHOLD) { 1555 ret = 2; 1556 goto out; 1557 } 1558 1559 i = byte_core_set_size(ws); 1560 if (i <= BYTE_CORE_SET_LOW) { 1561 ret = 3; 1562 goto out; 1563 } 1564 1565 if (i >= BYTE_CORE_SET_HIGH) { 1566 ret = 0; 1567 goto out; 1568 } 1569 1570 i = shannon_entropy(ws); 1571 if (i <= ENTROPY_LVL_ACEPTABLE) { 1572 ret = 4; 1573 goto out; 1574 } 1575 1576 /* 1577 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be 1578 * needed to give green light to compression. 1579 * 1580 * For now just assume that compression at that level is not worth the 1581 * resources because: 1582 * 1583 * 1. it is possible to defrag the data later 1584 * 1585 * 2. the data would turn out to be hardly compressible, eg. 150 byte 1586 * values, every bucket has counter at level ~54. The heuristic would 1587 * be confused. This can happen when data have some internal repeated 1588 * patterns like "abbacbbc...". This can be detected by analyzing 1589 * pairs of bytes, which is too costly. 1590 */ 1591 if (i < ENTROPY_LVL_HIGH) { 1592 ret = 5; 1593 goto out; 1594 } else { 1595 ret = 0; 1596 goto out; 1597 } 1598 1599 out: 1600 __free_workspace(0, ws_list, true); 1601 return ret; 1602 } 1603 1604 unsigned int btrfs_compress_str2level(const char *str) 1605 { 1606 if (strncmp(str, "zlib", 4) != 0) 1607 return 0; 1608 1609 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ 1610 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) 1611 return str[5] - '0'; 1612 1613 return BTRFS_ZLIB_DEFAULT_LEVEL; 1614 } 1615