xref: /openbmc/linux/fs/btrfs/compression.c (revision c0ecca6604b80e438b032578634c6e133c7028f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/file.h>
9 #include <linux/fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/highmem.h>
12 #include <linux/time.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sched/mm.h>
19 #include <linux/log2.h>
20 #include <crypto/hash.h>
21 #include "misc.h"
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "volumes.h"
27 #include "ordered-data.h"
28 #include "compression.h"
29 #include "extent_io.h"
30 #include "extent_map.h"
31 #include "zoned.h"
32 
33 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
34 
35 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
36 {
37 	switch (type) {
38 	case BTRFS_COMPRESS_ZLIB:
39 	case BTRFS_COMPRESS_LZO:
40 	case BTRFS_COMPRESS_ZSTD:
41 	case BTRFS_COMPRESS_NONE:
42 		return btrfs_compress_types[type];
43 	default:
44 		break;
45 	}
46 
47 	return NULL;
48 }
49 
50 bool btrfs_compress_is_valid_type(const char *str, size_t len)
51 {
52 	int i;
53 
54 	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
55 		size_t comp_len = strlen(btrfs_compress_types[i]);
56 
57 		if (len < comp_len)
58 			continue;
59 
60 		if (!strncmp(btrfs_compress_types[i], str, comp_len))
61 			return true;
62 	}
63 	return false;
64 }
65 
66 static int compression_compress_pages(int type, struct list_head *ws,
67                struct address_space *mapping, u64 start, struct page **pages,
68                unsigned long *out_pages, unsigned long *total_in,
69                unsigned long *total_out)
70 {
71 	switch (type) {
72 	case BTRFS_COMPRESS_ZLIB:
73 		return zlib_compress_pages(ws, mapping, start, pages,
74 				out_pages, total_in, total_out);
75 	case BTRFS_COMPRESS_LZO:
76 		return lzo_compress_pages(ws, mapping, start, pages,
77 				out_pages, total_in, total_out);
78 	case BTRFS_COMPRESS_ZSTD:
79 		return zstd_compress_pages(ws, mapping, start, pages,
80 				out_pages, total_in, total_out);
81 	case BTRFS_COMPRESS_NONE:
82 	default:
83 		/*
84 		 * This can happen when compression races with remount setting
85 		 * it to 'no compress', while caller doesn't call
86 		 * inode_need_compress() to check if we really need to
87 		 * compress.
88 		 *
89 		 * Not a big deal, just need to inform caller that we
90 		 * haven't allocated any pages yet.
91 		 */
92 		*out_pages = 0;
93 		return -E2BIG;
94 	}
95 }
96 
97 static int compression_decompress_bio(int type, struct list_head *ws,
98 		struct compressed_bio *cb)
99 {
100 	switch (type) {
101 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
102 	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
103 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
104 	case BTRFS_COMPRESS_NONE:
105 	default:
106 		/*
107 		 * This can't happen, the type is validated several times
108 		 * before we get here.
109 		 */
110 		BUG();
111 	}
112 }
113 
114 static int compression_decompress(int type, struct list_head *ws,
115                unsigned char *data_in, struct page *dest_page,
116                unsigned long start_byte, size_t srclen, size_t destlen)
117 {
118 	switch (type) {
119 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
120 						start_byte, srclen, destlen);
121 	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
122 						start_byte, srclen, destlen);
123 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
124 						start_byte, srclen, destlen);
125 	case BTRFS_COMPRESS_NONE:
126 	default:
127 		/*
128 		 * This can't happen, the type is validated several times
129 		 * before we get here.
130 		 */
131 		BUG();
132 	}
133 }
134 
135 static int btrfs_decompress_bio(struct compressed_bio *cb);
136 
137 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
138 				      unsigned long disk_size)
139 {
140 	return sizeof(struct compressed_bio) +
141 		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
142 }
143 
144 static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
145 				 u64 disk_start)
146 {
147 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
148 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
149 	const u32 csum_size = fs_info->csum_size;
150 	const u32 sectorsize = fs_info->sectorsize;
151 	struct page *page;
152 	unsigned long i;
153 	char *kaddr;
154 	u8 csum[BTRFS_CSUM_SIZE];
155 	struct compressed_bio *cb = bio->bi_private;
156 	u8 *cb_sum = cb->sums;
157 
158 	if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
159 		return 0;
160 
161 	shash->tfm = fs_info->csum_shash;
162 
163 	for (i = 0; i < cb->nr_pages; i++) {
164 		u32 pg_offset;
165 		u32 bytes_left = PAGE_SIZE;
166 		page = cb->compressed_pages[i];
167 
168 		/* Determine the remaining bytes inside the page first */
169 		if (i == cb->nr_pages - 1)
170 			bytes_left = cb->compressed_len - i * PAGE_SIZE;
171 
172 		/* Hash through the page sector by sector */
173 		for (pg_offset = 0; pg_offset < bytes_left;
174 		     pg_offset += sectorsize) {
175 			kaddr = kmap_atomic(page);
176 			crypto_shash_digest(shash, kaddr + pg_offset,
177 					    sectorsize, csum);
178 			kunmap_atomic(kaddr);
179 
180 			if (memcmp(&csum, cb_sum, csum_size) != 0) {
181 				btrfs_print_data_csum_error(inode, disk_start,
182 						csum, cb_sum, cb->mirror_num);
183 				if (btrfs_io_bio(bio)->device)
184 					btrfs_dev_stat_inc_and_print(
185 						btrfs_io_bio(bio)->device,
186 						BTRFS_DEV_STAT_CORRUPTION_ERRS);
187 				return -EIO;
188 			}
189 			cb_sum += csum_size;
190 			disk_start += sectorsize;
191 		}
192 	}
193 	return 0;
194 }
195 
196 /* when we finish reading compressed pages from the disk, we
197  * decompress them and then run the bio end_io routines on the
198  * decompressed pages (in the inode address space).
199  *
200  * This allows the checksumming and other IO error handling routines
201  * to work normally
202  *
203  * The compressed pages are freed here, and it must be run
204  * in process context
205  */
206 static void end_compressed_bio_read(struct bio *bio)
207 {
208 	struct compressed_bio *cb = bio->bi_private;
209 	struct inode *inode;
210 	struct page *page;
211 	unsigned long index;
212 	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
213 	int ret = 0;
214 
215 	if (bio->bi_status)
216 		cb->errors = 1;
217 
218 	/* if there are more bios still pending for this compressed
219 	 * extent, just exit
220 	 */
221 	if (!refcount_dec_and_test(&cb->pending_bios))
222 		goto out;
223 
224 	/*
225 	 * Record the correct mirror_num in cb->orig_bio so that
226 	 * read-repair can work properly.
227 	 */
228 	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
229 	cb->mirror_num = mirror;
230 
231 	/*
232 	 * Some IO in this cb have failed, just skip checksum as there
233 	 * is no way it could be correct.
234 	 */
235 	if (cb->errors == 1)
236 		goto csum_failed;
237 
238 	inode = cb->inode;
239 	ret = check_compressed_csum(BTRFS_I(inode), bio,
240 				    bio->bi_iter.bi_sector << 9);
241 	if (ret)
242 		goto csum_failed;
243 
244 	/* ok, we're the last bio for this extent, lets start
245 	 * the decompression.
246 	 */
247 	ret = btrfs_decompress_bio(cb);
248 
249 csum_failed:
250 	if (ret)
251 		cb->errors = 1;
252 
253 	/* release the compressed pages */
254 	index = 0;
255 	for (index = 0; index < cb->nr_pages; index++) {
256 		page = cb->compressed_pages[index];
257 		page->mapping = NULL;
258 		put_page(page);
259 	}
260 
261 	/* do io completion on the original bio */
262 	if (cb->errors) {
263 		bio_io_error(cb->orig_bio);
264 	} else {
265 		struct bio_vec *bvec;
266 		struct bvec_iter_all iter_all;
267 
268 		/*
269 		 * we have verified the checksum already, set page
270 		 * checked so the end_io handlers know about it
271 		 */
272 		ASSERT(!bio_flagged(bio, BIO_CLONED));
273 		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
274 			SetPageChecked(bvec->bv_page);
275 
276 		bio_endio(cb->orig_bio);
277 	}
278 
279 	/* finally free the cb struct */
280 	kfree(cb->compressed_pages);
281 	kfree(cb);
282 out:
283 	bio_put(bio);
284 }
285 
286 /*
287  * Clear the writeback bits on all of the file
288  * pages for a compressed write
289  */
290 static noinline void end_compressed_writeback(struct inode *inode,
291 					      const struct compressed_bio *cb)
292 {
293 	unsigned long index = cb->start >> PAGE_SHIFT;
294 	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
295 	struct page *pages[16];
296 	unsigned long nr_pages = end_index - index + 1;
297 	int i;
298 	int ret;
299 
300 	if (cb->errors)
301 		mapping_set_error(inode->i_mapping, -EIO);
302 
303 	while (nr_pages > 0) {
304 		ret = find_get_pages_contig(inode->i_mapping, index,
305 				     min_t(unsigned long,
306 				     nr_pages, ARRAY_SIZE(pages)), pages);
307 		if (ret == 0) {
308 			nr_pages -= 1;
309 			index += 1;
310 			continue;
311 		}
312 		for (i = 0; i < ret; i++) {
313 			if (cb->errors)
314 				SetPageError(pages[i]);
315 			end_page_writeback(pages[i]);
316 			put_page(pages[i]);
317 		}
318 		nr_pages -= ret;
319 		index += ret;
320 	}
321 	/* the inode may be gone now */
322 }
323 
324 /*
325  * do the cleanup once all the compressed pages hit the disk.
326  * This will clear writeback on the file pages and free the compressed
327  * pages.
328  *
329  * This also calls the writeback end hooks for the file pages so that
330  * metadata and checksums can be updated in the file.
331  */
332 static void end_compressed_bio_write(struct bio *bio)
333 {
334 	struct compressed_bio *cb = bio->bi_private;
335 	struct inode *inode;
336 	struct page *page;
337 	unsigned long index;
338 
339 	if (bio->bi_status)
340 		cb->errors = 1;
341 
342 	/* if there are more bios still pending for this compressed
343 	 * extent, just exit
344 	 */
345 	if (!refcount_dec_and_test(&cb->pending_bios))
346 		goto out;
347 
348 	/* ok, we're the last bio for this extent, step one is to
349 	 * call back into the FS and do all the end_io operations
350 	 */
351 	inode = cb->inode;
352 	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
353 	btrfs_record_physical_zoned(inode, cb->start, bio);
354 	btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
355 			cb->start, cb->start + cb->len - 1,
356 			bio->bi_status == BLK_STS_OK);
357 	cb->compressed_pages[0]->mapping = NULL;
358 
359 	end_compressed_writeback(inode, cb);
360 	/* note, our inode could be gone now */
361 
362 	/*
363 	 * release the compressed pages, these came from alloc_page and
364 	 * are not attached to the inode at all
365 	 */
366 	index = 0;
367 	for (index = 0; index < cb->nr_pages; index++) {
368 		page = cb->compressed_pages[index];
369 		page->mapping = NULL;
370 		put_page(page);
371 	}
372 
373 	/* finally free the cb struct */
374 	kfree(cb->compressed_pages);
375 	kfree(cb);
376 out:
377 	bio_put(bio);
378 }
379 
380 /*
381  * worker function to build and submit bios for previously compressed pages.
382  * The corresponding pages in the inode should be marked for writeback
383  * and the compressed pages should have a reference on them for dropping
384  * when the IO is complete.
385  *
386  * This also checksums the file bytes and gets things ready for
387  * the end io hooks.
388  */
389 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
390 				 unsigned long len, u64 disk_start,
391 				 unsigned long compressed_len,
392 				 struct page **compressed_pages,
393 				 unsigned long nr_pages,
394 				 unsigned int write_flags,
395 				 struct cgroup_subsys_state *blkcg_css)
396 {
397 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
398 	struct bio *bio = NULL;
399 	struct compressed_bio *cb;
400 	unsigned long bytes_left;
401 	int pg_index = 0;
402 	struct page *page;
403 	u64 first_byte = disk_start;
404 	blk_status_t ret;
405 	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
406 	const bool use_append = btrfs_use_zone_append(inode, disk_start);
407 	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
408 
409 	WARN_ON(!PAGE_ALIGNED(start));
410 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
411 	if (!cb)
412 		return BLK_STS_RESOURCE;
413 	refcount_set(&cb->pending_bios, 0);
414 	cb->errors = 0;
415 	cb->inode = &inode->vfs_inode;
416 	cb->start = start;
417 	cb->len = len;
418 	cb->mirror_num = 0;
419 	cb->compressed_pages = compressed_pages;
420 	cb->compressed_len = compressed_len;
421 	cb->orig_bio = NULL;
422 	cb->nr_pages = nr_pages;
423 
424 	bio = btrfs_bio_alloc(first_byte);
425 	bio->bi_opf = bio_op | write_flags;
426 	bio->bi_private = cb;
427 	bio->bi_end_io = end_compressed_bio_write;
428 
429 	if (use_append) {
430 		struct extent_map *em;
431 		struct map_lookup *map;
432 		struct block_device *bdev;
433 
434 		em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
435 		if (IS_ERR(em)) {
436 			kfree(cb);
437 			bio_put(bio);
438 			return BLK_STS_NOTSUPP;
439 		}
440 
441 		map = em->map_lookup;
442 		/* We only support single profile for now */
443 		ASSERT(map->num_stripes == 1);
444 		bdev = map->stripes[0].dev->bdev;
445 
446 		bio_set_dev(bio, bdev);
447 		free_extent_map(em);
448 	}
449 
450 	if (blkcg_css) {
451 		bio->bi_opf |= REQ_CGROUP_PUNT;
452 		kthread_associate_blkcg(blkcg_css);
453 	}
454 	refcount_set(&cb->pending_bios, 1);
455 
456 	/* create and submit bios for the compressed pages */
457 	bytes_left = compressed_len;
458 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
459 		int submit = 0;
460 		int len = 0;
461 
462 		page = compressed_pages[pg_index];
463 		page->mapping = inode->vfs_inode.i_mapping;
464 		if (bio->bi_iter.bi_size)
465 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
466 							  0);
467 
468 		/*
469 		 * Page can only be added to bio if the current bio fits in
470 		 * stripe.
471 		 */
472 		if (!submit) {
473 			if (pg_index == 0 && use_append)
474 				len = bio_add_zone_append_page(bio, page,
475 							       PAGE_SIZE, 0);
476 			else
477 				len = bio_add_page(bio, page, PAGE_SIZE, 0);
478 		}
479 
480 		page->mapping = NULL;
481 		if (submit || len < PAGE_SIZE) {
482 			/*
483 			 * inc the count before we submit the bio so
484 			 * we know the end IO handler won't happen before
485 			 * we inc the count.  Otherwise, the cb might get
486 			 * freed before we're done setting it up
487 			 */
488 			refcount_inc(&cb->pending_bios);
489 			ret = btrfs_bio_wq_end_io(fs_info, bio,
490 						  BTRFS_WQ_ENDIO_DATA);
491 			BUG_ON(ret); /* -ENOMEM */
492 
493 			if (!skip_sum) {
494 				ret = btrfs_csum_one_bio(inode, bio, start, 1);
495 				BUG_ON(ret); /* -ENOMEM */
496 			}
497 
498 			ret = btrfs_map_bio(fs_info, bio, 0);
499 			if (ret) {
500 				bio->bi_status = ret;
501 				bio_endio(bio);
502 			}
503 
504 			bio = btrfs_bio_alloc(first_byte);
505 			bio->bi_opf = bio_op | write_flags;
506 			bio->bi_private = cb;
507 			bio->bi_end_io = end_compressed_bio_write;
508 			if (blkcg_css)
509 				bio->bi_opf |= REQ_CGROUP_PUNT;
510 			/*
511 			 * Use bio_add_page() to ensure the bio has at least one
512 			 * page.
513 			 */
514 			bio_add_page(bio, page, PAGE_SIZE, 0);
515 		}
516 		if (bytes_left < PAGE_SIZE) {
517 			btrfs_info(fs_info,
518 					"bytes left %lu compress len %lu nr %lu",
519 			       bytes_left, cb->compressed_len, cb->nr_pages);
520 		}
521 		bytes_left -= PAGE_SIZE;
522 		first_byte += PAGE_SIZE;
523 		cond_resched();
524 	}
525 
526 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
527 	BUG_ON(ret); /* -ENOMEM */
528 
529 	if (!skip_sum) {
530 		ret = btrfs_csum_one_bio(inode, bio, start, 1);
531 		BUG_ON(ret); /* -ENOMEM */
532 	}
533 
534 	ret = btrfs_map_bio(fs_info, bio, 0);
535 	if (ret) {
536 		bio->bi_status = ret;
537 		bio_endio(bio);
538 	}
539 
540 	if (blkcg_css)
541 		kthread_associate_blkcg(NULL);
542 
543 	return 0;
544 }
545 
546 static u64 bio_end_offset(struct bio *bio)
547 {
548 	struct bio_vec *last = bio_last_bvec_all(bio);
549 
550 	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
551 }
552 
553 static noinline int add_ra_bio_pages(struct inode *inode,
554 				     u64 compressed_end,
555 				     struct compressed_bio *cb)
556 {
557 	unsigned long end_index;
558 	unsigned long pg_index;
559 	u64 last_offset;
560 	u64 isize = i_size_read(inode);
561 	int ret;
562 	struct page *page;
563 	unsigned long nr_pages = 0;
564 	struct extent_map *em;
565 	struct address_space *mapping = inode->i_mapping;
566 	struct extent_map_tree *em_tree;
567 	struct extent_io_tree *tree;
568 	u64 end;
569 	int misses = 0;
570 
571 	last_offset = bio_end_offset(cb->orig_bio);
572 	em_tree = &BTRFS_I(inode)->extent_tree;
573 	tree = &BTRFS_I(inode)->io_tree;
574 
575 	if (isize == 0)
576 		return 0;
577 
578 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
579 
580 	while (last_offset < compressed_end) {
581 		pg_index = last_offset >> PAGE_SHIFT;
582 
583 		if (pg_index > end_index)
584 			break;
585 
586 		page = xa_load(&mapping->i_pages, pg_index);
587 		if (page && !xa_is_value(page)) {
588 			misses++;
589 			if (misses > 4)
590 				break;
591 			goto next;
592 		}
593 
594 		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
595 								 ~__GFP_FS));
596 		if (!page)
597 			break;
598 
599 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
600 			put_page(page);
601 			goto next;
602 		}
603 
604 		/*
605 		 * at this point, we have a locked page in the page cache
606 		 * for these bytes in the file.  But, we have to make
607 		 * sure they map to this compressed extent on disk.
608 		 */
609 		ret = set_page_extent_mapped(page);
610 		if (ret < 0) {
611 			unlock_page(page);
612 			put_page(page);
613 			break;
614 		}
615 
616 		end = last_offset + PAGE_SIZE - 1;
617 		lock_extent(tree, last_offset, end);
618 		read_lock(&em_tree->lock);
619 		em = lookup_extent_mapping(em_tree, last_offset,
620 					   PAGE_SIZE);
621 		read_unlock(&em_tree->lock);
622 
623 		if (!em || last_offset < em->start ||
624 		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
625 		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
626 			free_extent_map(em);
627 			unlock_extent(tree, last_offset, end);
628 			unlock_page(page);
629 			put_page(page);
630 			break;
631 		}
632 		free_extent_map(em);
633 
634 		if (page->index == end_index) {
635 			size_t zero_offset = offset_in_page(isize);
636 
637 			if (zero_offset) {
638 				int zeros;
639 				zeros = PAGE_SIZE - zero_offset;
640 				memzero_page(page, zero_offset, zeros);
641 				flush_dcache_page(page);
642 			}
643 		}
644 
645 		ret = bio_add_page(cb->orig_bio, page,
646 				   PAGE_SIZE, 0);
647 
648 		if (ret == PAGE_SIZE) {
649 			nr_pages++;
650 			put_page(page);
651 		} else {
652 			unlock_extent(tree, last_offset, end);
653 			unlock_page(page);
654 			put_page(page);
655 			break;
656 		}
657 next:
658 		last_offset += PAGE_SIZE;
659 	}
660 	return 0;
661 }
662 
663 /*
664  * for a compressed read, the bio we get passed has all the inode pages
665  * in it.  We don't actually do IO on those pages but allocate new ones
666  * to hold the compressed pages on disk.
667  *
668  * bio->bi_iter.bi_sector points to the compressed extent on disk
669  * bio->bi_io_vec points to all of the inode pages
670  *
671  * After the compressed pages are read, we copy the bytes into the
672  * bio we were passed and then call the bio end_io calls
673  */
674 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
675 				 int mirror_num, unsigned long bio_flags)
676 {
677 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
678 	struct extent_map_tree *em_tree;
679 	struct compressed_bio *cb;
680 	unsigned long compressed_len;
681 	unsigned long nr_pages;
682 	unsigned long pg_index;
683 	struct page *page;
684 	struct bio *comp_bio;
685 	u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
686 	u64 em_len;
687 	u64 em_start;
688 	struct extent_map *em;
689 	blk_status_t ret = BLK_STS_RESOURCE;
690 	int faili = 0;
691 	u8 *sums;
692 
693 	em_tree = &BTRFS_I(inode)->extent_tree;
694 
695 	/* we need the actual starting offset of this extent in the file */
696 	read_lock(&em_tree->lock);
697 	em = lookup_extent_mapping(em_tree,
698 				   page_offset(bio_first_page_all(bio)),
699 				   fs_info->sectorsize);
700 	read_unlock(&em_tree->lock);
701 	if (!em)
702 		return BLK_STS_IOERR;
703 
704 	compressed_len = em->block_len;
705 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
706 	if (!cb)
707 		goto out;
708 
709 	refcount_set(&cb->pending_bios, 0);
710 	cb->errors = 0;
711 	cb->inode = inode;
712 	cb->mirror_num = mirror_num;
713 	sums = cb->sums;
714 
715 	cb->start = em->orig_start;
716 	em_len = em->len;
717 	em_start = em->start;
718 
719 	free_extent_map(em);
720 	em = NULL;
721 
722 	cb->len = bio->bi_iter.bi_size;
723 	cb->compressed_len = compressed_len;
724 	cb->compress_type = extent_compress_type(bio_flags);
725 	cb->orig_bio = bio;
726 
727 	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
728 	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
729 				       GFP_NOFS);
730 	if (!cb->compressed_pages)
731 		goto fail1;
732 
733 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
734 		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
735 							      __GFP_HIGHMEM);
736 		if (!cb->compressed_pages[pg_index]) {
737 			faili = pg_index - 1;
738 			ret = BLK_STS_RESOURCE;
739 			goto fail2;
740 		}
741 	}
742 	faili = nr_pages - 1;
743 	cb->nr_pages = nr_pages;
744 
745 	add_ra_bio_pages(inode, em_start + em_len, cb);
746 
747 	/* include any pages we added in add_ra-bio_pages */
748 	cb->len = bio->bi_iter.bi_size;
749 
750 	comp_bio = btrfs_bio_alloc(cur_disk_byte);
751 	comp_bio->bi_opf = REQ_OP_READ;
752 	comp_bio->bi_private = cb;
753 	comp_bio->bi_end_io = end_compressed_bio_read;
754 	refcount_set(&cb->pending_bios, 1);
755 
756 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
757 		u32 pg_len = PAGE_SIZE;
758 		int submit = 0;
759 
760 		/*
761 		 * To handle subpage case, we need to make sure the bio only
762 		 * covers the range we need.
763 		 *
764 		 * If we're at the last page, truncate the length to only cover
765 		 * the remaining part.
766 		 */
767 		if (pg_index == nr_pages - 1)
768 			pg_len = min_t(u32, PAGE_SIZE,
769 					compressed_len - pg_index * PAGE_SIZE);
770 
771 		page = cb->compressed_pages[pg_index];
772 		page->mapping = inode->i_mapping;
773 		page->index = em_start >> PAGE_SHIFT;
774 
775 		if (comp_bio->bi_iter.bi_size)
776 			submit = btrfs_bio_fits_in_stripe(page, pg_len,
777 							  comp_bio, 0);
778 
779 		page->mapping = NULL;
780 		if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
781 			unsigned int nr_sectors;
782 
783 			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
784 						  BTRFS_WQ_ENDIO_DATA);
785 			BUG_ON(ret); /* -ENOMEM */
786 
787 			/*
788 			 * inc the count before we submit the bio so
789 			 * we know the end IO handler won't happen before
790 			 * we inc the count.  Otherwise, the cb might get
791 			 * freed before we're done setting it up
792 			 */
793 			refcount_inc(&cb->pending_bios);
794 
795 			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
796 			BUG_ON(ret); /* -ENOMEM */
797 
798 			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
799 						  fs_info->sectorsize);
800 			sums += fs_info->csum_size * nr_sectors;
801 
802 			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
803 			if (ret) {
804 				comp_bio->bi_status = ret;
805 				bio_endio(comp_bio);
806 			}
807 
808 			comp_bio = btrfs_bio_alloc(cur_disk_byte);
809 			comp_bio->bi_opf = REQ_OP_READ;
810 			comp_bio->bi_private = cb;
811 			comp_bio->bi_end_io = end_compressed_bio_read;
812 
813 			bio_add_page(comp_bio, page, pg_len, 0);
814 		}
815 		cur_disk_byte += pg_len;
816 	}
817 
818 	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
819 	BUG_ON(ret); /* -ENOMEM */
820 
821 	ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
822 	BUG_ON(ret); /* -ENOMEM */
823 
824 	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
825 	if (ret) {
826 		comp_bio->bi_status = ret;
827 		bio_endio(comp_bio);
828 	}
829 
830 	return 0;
831 
832 fail2:
833 	while (faili >= 0) {
834 		__free_page(cb->compressed_pages[faili]);
835 		faili--;
836 	}
837 
838 	kfree(cb->compressed_pages);
839 fail1:
840 	kfree(cb);
841 out:
842 	free_extent_map(em);
843 	return ret;
844 }
845 
846 /*
847  * Heuristic uses systematic sampling to collect data from the input data
848  * range, the logic can be tuned by the following constants:
849  *
850  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
851  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
852  */
853 #define SAMPLING_READ_SIZE	(16)
854 #define SAMPLING_INTERVAL	(256)
855 
856 /*
857  * For statistical analysis of the input data we consider bytes that form a
858  * Galois Field of 256 objects. Each object has an attribute count, ie. how
859  * many times the object appeared in the sample.
860  */
861 #define BUCKET_SIZE		(256)
862 
863 /*
864  * The size of the sample is based on a statistical sampling rule of thumb.
865  * The common way is to perform sampling tests as long as the number of
866  * elements in each cell is at least 5.
867  *
868  * Instead of 5, we choose 32 to obtain more accurate results.
869  * If the data contain the maximum number of symbols, which is 256, we obtain a
870  * sample size bound by 8192.
871  *
872  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
873  * from up to 512 locations.
874  */
875 #define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
876 				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
877 
878 struct bucket_item {
879 	u32 count;
880 };
881 
882 struct heuristic_ws {
883 	/* Partial copy of input data */
884 	u8 *sample;
885 	u32 sample_size;
886 	/* Buckets store counters for each byte value */
887 	struct bucket_item *bucket;
888 	/* Sorting buffer */
889 	struct bucket_item *bucket_b;
890 	struct list_head list;
891 };
892 
893 static struct workspace_manager heuristic_wsm;
894 
895 static void free_heuristic_ws(struct list_head *ws)
896 {
897 	struct heuristic_ws *workspace;
898 
899 	workspace = list_entry(ws, struct heuristic_ws, list);
900 
901 	kvfree(workspace->sample);
902 	kfree(workspace->bucket);
903 	kfree(workspace->bucket_b);
904 	kfree(workspace);
905 }
906 
907 static struct list_head *alloc_heuristic_ws(unsigned int level)
908 {
909 	struct heuristic_ws *ws;
910 
911 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
912 	if (!ws)
913 		return ERR_PTR(-ENOMEM);
914 
915 	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
916 	if (!ws->sample)
917 		goto fail;
918 
919 	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
920 	if (!ws->bucket)
921 		goto fail;
922 
923 	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
924 	if (!ws->bucket_b)
925 		goto fail;
926 
927 	INIT_LIST_HEAD(&ws->list);
928 	return &ws->list;
929 fail:
930 	free_heuristic_ws(&ws->list);
931 	return ERR_PTR(-ENOMEM);
932 }
933 
934 const struct btrfs_compress_op btrfs_heuristic_compress = {
935 	.workspace_manager = &heuristic_wsm,
936 };
937 
938 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
939 	/* The heuristic is represented as compression type 0 */
940 	&btrfs_heuristic_compress,
941 	&btrfs_zlib_compress,
942 	&btrfs_lzo_compress,
943 	&btrfs_zstd_compress,
944 };
945 
946 static struct list_head *alloc_workspace(int type, unsigned int level)
947 {
948 	switch (type) {
949 	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
950 	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
951 	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
952 	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
953 	default:
954 		/*
955 		 * This can't happen, the type is validated several times
956 		 * before we get here.
957 		 */
958 		BUG();
959 	}
960 }
961 
962 static void free_workspace(int type, struct list_head *ws)
963 {
964 	switch (type) {
965 	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
966 	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
967 	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
968 	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
969 	default:
970 		/*
971 		 * This can't happen, the type is validated several times
972 		 * before we get here.
973 		 */
974 		BUG();
975 	}
976 }
977 
978 static void btrfs_init_workspace_manager(int type)
979 {
980 	struct workspace_manager *wsm;
981 	struct list_head *workspace;
982 
983 	wsm = btrfs_compress_op[type]->workspace_manager;
984 	INIT_LIST_HEAD(&wsm->idle_ws);
985 	spin_lock_init(&wsm->ws_lock);
986 	atomic_set(&wsm->total_ws, 0);
987 	init_waitqueue_head(&wsm->ws_wait);
988 
989 	/*
990 	 * Preallocate one workspace for each compression type so we can
991 	 * guarantee forward progress in the worst case
992 	 */
993 	workspace = alloc_workspace(type, 0);
994 	if (IS_ERR(workspace)) {
995 		pr_warn(
996 	"BTRFS: cannot preallocate compression workspace, will try later\n");
997 	} else {
998 		atomic_set(&wsm->total_ws, 1);
999 		wsm->free_ws = 1;
1000 		list_add(workspace, &wsm->idle_ws);
1001 	}
1002 }
1003 
1004 static void btrfs_cleanup_workspace_manager(int type)
1005 {
1006 	struct workspace_manager *wsman;
1007 	struct list_head *ws;
1008 
1009 	wsman = btrfs_compress_op[type]->workspace_manager;
1010 	while (!list_empty(&wsman->idle_ws)) {
1011 		ws = wsman->idle_ws.next;
1012 		list_del(ws);
1013 		free_workspace(type, ws);
1014 		atomic_dec(&wsman->total_ws);
1015 	}
1016 }
1017 
1018 /*
1019  * This finds an available workspace or allocates a new one.
1020  * If it's not possible to allocate a new one, waits until there's one.
1021  * Preallocation makes a forward progress guarantees and we do not return
1022  * errors.
1023  */
1024 struct list_head *btrfs_get_workspace(int type, unsigned int level)
1025 {
1026 	struct workspace_manager *wsm;
1027 	struct list_head *workspace;
1028 	int cpus = num_online_cpus();
1029 	unsigned nofs_flag;
1030 	struct list_head *idle_ws;
1031 	spinlock_t *ws_lock;
1032 	atomic_t *total_ws;
1033 	wait_queue_head_t *ws_wait;
1034 	int *free_ws;
1035 
1036 	wsm = btrfs_compress_op[type]->workspace_manager;
1037 	idle_ws	 = &wsm->idle_ws;
1038 	ws_lock	 = &wsm->ws_lock;
1039 	total_ws = &wsm->total_ws;
1040 	ws_wait	 = &wsm->ws_wait;
1041 	free_ws	 = &wsm->free_ws;
1042 
1043 again:
1044 	spin_lock(ws_lock);
1045 	if (!list_empty(idle_ws)) {
1046 		workspace = idle_ws->next;
1047 		list_del(workspace);
1048 		(*free_ws)--;
1049 		spin_unlock(ws_lock);
1050 		return workspace;
1051 
1052 	}
1053 	if (atomic_read(total_ws) > cpus) {
1054 		DEFINE_WAIT(wait);
1055 
1056 		spin_unlock(ws_lock);
1057 		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1058 		if (atomic_read(total_ws) > cpus && !*free_ws)
1059 			schedule();
1060 		finish_wait(ws_wait, &wait);
1061 		goto again;
1062 	}
1063 	atomic_inc(total_ws);
1064 	spin_unlock(ws_lock);
1065 
1066 	/*
1067 	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1068 	 * to turn it off here because we might get called from the restricted
1069 	 * context of btrfs_compress_bio/btrfs_compress_pages
1070 	 */
1071 	nofs_flag = memalloc_nofs_save();
1072 	workspace = alloc_workspace(type, level);
1073 	memalloc_nofs_restore(nofs_flag);
1074 
1075 	if (IS_ERR(workspace)) {
1076 		atomic_dec(total_ws);
1077 		wake_up(ws_wait);
1078 
1079 		/*
1080 		 * Do not return the error but go back to waiting. There's a
1081 		 * workspace preallocated for each type and the compression
1082 		 * time is bounded so we get to a workspace eventually. This
1083 		 * makes our caller's life easier.
1084 		 *
1085 		 * To prevent silent and low-probability deadlocks (when the
1086 		 * initial preallocation fails), check if there are any
1087 		 * workspaces at all.
1088 		 */
1089 		if (atomic_read(total_ws) == 0) {
1090 			static DEFINE_RATELIMIT_STATE(_rs,
1091 					/* once per minute */ 60 * HZ,
1092 					/* no burst */ 1);
1093 
1094 			if (__ratelimit(&_rs)) {
1095 				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1096 			}
1097 		}
1098 		goto again;
1099 	}
1100 	return workspace;
1101 }
1102 
1103 static struct list_head *get_workspace(int type, int level)
1104 {
1105 	switch (type) {
1106 	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1107 	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1108 	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1109 	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1110 	default:
1111 		/*
1112 		 * This can't happen, the type is validated several times
1113 		 * before we get here.
1114 		 */
1115 		BUG();
1116 	}
1117 }
1118 
1119 /*
1120  * put a workspace struct back on the list or free it if we have enough
1121  * idle ones sitting around
1122  */
1123 void btrfs_put_workspace(int type, struct list_head *ws)
1124 {
1125 	struct workspace_manager *wsm;
1126 	struct list_head *idle_ws;
1127 	spinlock_t *ws_lock;
1128 	atomic_t *total_ws;
1129 	wait_queue_head_t *ws_wait;
1130 	int *free_ws;
1131 
1132 	wsm = btrfs_compress_op[type]->workspace_manager;
1133 	idle_ws	 = &wsm->idle_ws;
1134 	ws_lock	 = &wsm->ws_lock;
1135 	total_ws = &wsm->total_ws;
1136 	ws_wait	 = &wsm->ws_wait;
1137 	free_ws	 = &wsm->free_ws;
1138 
1139 	spin_lock(ws_lock);
1140 	if (*free_ws <= num_online_cpus()) {
1141 		list_add(ws, idle_ws);
1142 		(*free_ws)++;
1143 		spin_unlock(ws_lock);
1144 		goto wake;
1145 	}
1146 	spin_unlock(ws_lock);
1147 
1148 	free_workspace(type, ws);
1149 	atomic_dec(total_ws);
1150 wake:
1151 	cond_wake_up(ws_wait);
1152 }
1153 
1154 static void put_workspace(int type, struct list_head *ws)
1155 {
1156 	switch (type) {
1157 	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1158 	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1159 	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1160 	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1161 	default:
1162 		/*
1163 		 * This can't happen, the type is validated several times
1164 		 * before we get here.
1165 		 */
1166 		BUG();
1167 	}
1168 }
1169 
1170 /*
1171  * Adjust @level according to the limits of the compression algorithm or
1172  * fallback to default
1173  */
1174 static unsigned int btrfs_compress_set_level(int type, unsigned level)
1175 {
1176 	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1177 
1178 	if (level == 0)
1179 		level = ops->default_level;
1180 	else
1181 		level = min(level, ops->max_level);
1182 
1183 	return level;
1184 }
1185 
1186 /*
1187  * Given an address space and start and length, compress the bytes into @pages
1188  * that are allocated on demand.
1189  *
1190  * @type_level is encoded algorithm and level, where level 0 means whatever
1191  * default the algorithm chooses and is opaque here;
1192  * - compression algo are 0-3
1193  * - the level are bits 4-7
1194  *
1195  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1196  * and returns number of actually allocated pages
1197  *
1198  * @total_in is used to return the number of bytes actually read.  It
1199  * may be smaller than the input length if we had to exit early because we
1200  * ran out of room in the pages array or because we cross the
1201  * max_out threshold.
1202  *
1203  * @total_out is an in/out parameter, must be set to the input length and will
1204  * be also used to return the total number of compressed bytes
1205  *
1206  * @max_out tells us the max number of bytes that we're allowed to
1207  * stuff into pages
1208  */
1209 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1210 			 u64 start, struct page **pages,
1211 			 unsigned long *out_pages,
1212 			 unsigned long *total_in,
1213 			 unsigned long *total_out)
1214 {
1215 	int type = btrfs_compress_type(type_level);
1216 	int level = btrfs_compress_level(type_level);
1217 	struct list_head *workspace;
1218 	int ret;
1219 
1220 	level = btrfs_compress_set_level(type, level);
1221 	workspace = get_workspace(type, level);
1222 	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1223 					 out_pages, total_in, total_out);
1224 	put_workspace(type, workspace);
1225 	return ret;
1226 }
1227 
1228 /*
1229  * pages_in is an array of pages with compressed data.
1230  *
1231  * disk_start is the starting logical offset of this array in the file
1232  *
1233  * orig_bio contains the pages from the file that we want to decompress into
1234  *
1235  * srclen is the number of bytes in pages_in
1236  *
1237  * The basic idea is that we have a bio that was created by readpages.
1238  * The pages in the bio are for the uncompressed data, and they may not
1239  * be contiguous.  They all correspond to the range of bytes covered by
1240  * the compressed extent.
1241  */
1242 static int btrfs_decompress_bio(struct compressed_bio *cb)
1243 {
1244 	struct list_head *workspace;
1245 	int ret;
1246 	int type = cb->compress_type;
1247 
1248 	workspace = get_workspace(type, 0);
1249 	ret = compression_decompress_bio(type, workspace, cb);
1250 	put_workspace(type, workspace);
1251 
1252 	return ret;
1253 }
1254 
1255 /*
1256  * a less complex decompression routine.  Our compressed data fits in a
1257  * single page, and we want to read a single page out of it.
1258  * start_byte tells us the offset into the compressed data we're interested in
1259  */
1260 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1261 		     unsigned long start_byte, size_t srclen, size_t destlen)
1262 {
1263 	struct list_head *workspace;
1264 	int ret;
1265 
1266 	workspace = get_workspace(type, 0);
1267 	ret = compression_decompress(type, workspace, data_in, dest_page,
1268 				     start_byte, srclen, destlen);
1269 	put_workspace(type, workspace);
1270 
1271 	return ret;
1272 }
1273 
1274 void __init btrfs_init_compress(void)
1275 {
1276 	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1277 	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1278 	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1279 	zstd_init_workspace_manager();
1280 }
1281 
1282 void __cold btrfs_exit_compress(void)
1283 {
1284 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1285 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1286 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1287 	zstd_cleanup_workspace_manager();
1288 }
1289 
1290 /*
1291  * Copy uncompressed data from working buffer to pages.
1292  *
1293  * buf_start is the byte offset we're of the start of our workspace buffer.
1294  *
1295  * total_out is the last byte of the buffer
1296  */
1297 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1298 			      unsigned long total_out, u64 disk_start,
1299 			      struct bio *bio)
1300 {
1301 	unsigned long buf_offset;
1302 	unsigned long current_buf_start;
1303 	unsigned long start_byte;
1304 	unsigned long prev_start_byte;
1305 	unsigned long working_bytes = total_out - buf_start;
1306 	unsigned long bytes;
1307 	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1308 
1309 	/*
1310 	 * start byte is the first byte of the page we're currently
1311 	 * copying into relative to the start of the compressed data.
1312 	 */
1313 	start_byte = page_offset(bvec.bv_page) - disk_start;
1314 
1315 	/* we haven't yet hit data corresponding to this page */
1316 	if (total_out <= start_byte)
1317 		return 1;
1318 
1319 	/*
1320 	 * the start of the data we care about is offset into
1321 	 * the middle of our working buffer
1322 	 */
1323 	if (total_out > start_byte && buf_start < start_byte) {
1324 		buf_offset = start_byte - buf_start;
1325 		working_bytes -= buf_offset;
1326 	} else {
1327 		buf_offset = 0;
1328 	}
1329 	current_buf_start = buf_start;
1330 
1331 	/* copy bytes from the working buffer into the pages */
1332 	while (working_bytes > 0) {
1333 		bytes = min_t(unsigned long, bvec.bv_len,
1334 				PAGE_SIZE - (buf_offset % PAGE_SIZE));
1335 		bytes = min(bytes, working_bytes);
1336 
1337 		memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1338 			       bytes);
1339 		flush_dcache_page(bvec.bv_page);
1340 
1341 		buf_offset += bytes;
1342 		working_bytes -= bytes;
1343 		current_buf_start += bytes;
1344 
1345 		/* check if we need to pick another page */
1346 		bio_advance(bio, bytes);
1347 		if (!bio->bi_iter.bi_size)
1348 			return 0;
1349 		bvec = bio_iter_iovec(bio, bio->bi_iter);
1350 		prev_start_byte = start_byte;
1351 		start_byte = page_offset(bvec.bv_page) - disk_start;
1352 
1353 		/*
1354 		 * We need to make sure we're only adjusting
1355 		 * our offset into compression working buffer when
1356 		 * we're switching pages.  Otherwise we can incorrectly
1357 		 * keep copying when we were actually done.
1358 		 */
1359 		if (start_byte != prev_start_byte) {
1360 			/*
1361 			 * make sure our new page is covered by this
1362 			 * working buffer
1363 			 */
1364 			if (total_out <= start_byte)
1365 				return 1;
1366 
1367 			/*
1368 			 * the next page in the biovec might not be adjacent
1369 			 * to the last page, but it might still be found
1370 			 * inside this working buffer. bump our offset pointer
1371 			 */
1372 			if (total_out > start_byte &&
1373 			    current_buf_start < start_byte) {
1374 				buf_offset = start_byte - buf_start;
1375 				working_bytes = total_out - start_byte;
1376 				current_buf_start = buf_start + buf_offset;
1377 			}
1378 		}
1379 	}
1380 
1381 	return 1;
1382 }
1383 
1384 /*
1385  * Shannon Entropy calculation
1386  *
1387  * Pure byte distribution analysis fails to determine compressibility of data.
1388  * Try calculating entropy to estimate the average minimum number of bits
1389  * needed to encode the sampled data.
1390  *
1391  * For convenience, return the percentage of needed bits, instead of amount of
1392  * bits directly.
1393  *
1394  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1395  *			    and can be compressible with high probability
1396  *
1397  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1398  *
1399  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1400  */
1401 #define ENTROPY_LVL_ACEPTABLE		(65)
1402 #define ENTROPY_LVL_HIGH		(80)
1403 
1404 /*
1405  * For increasead precision in shannon_entropy calculation,
1406  * let's do pow(n, M) to save more digits after comma:
1407  *
1408  * - maximum int bit length is 64
1409  * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1410  * - 13 * 4 = 52 < 64		-> M = 4
1411  *
1412  * So use pow(n, 4).
1413  */
1414 static inline u32 ilog2_w(u64 n)
1415 {
1416 	return ilog2(n * n * n * n);
1417 }
1418 
1419 static u32 shannon_entropy(struct heuristic_ws *ws)
1420 {
1421 	const u32 entropy_max = 8 * ilog2_w(2);
1422 	u32 entropy_sum = 0;
1423 	u32 p, p_base, sz_base;
1424 	u32 i;
1425 
1426 	sz_base = ilog2_w(ws->sample_size);
1427 	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1428 		p = ws->bucket[i].count;
1429 		p_base = ilog2_w(p);
1430 		entropy_sum += p * (sz_base - p_base);
1431 	}
1432 
1433 	entropy_sum /= ws->sample_size;
1434 	return entropy_sum * 100 / entropy_max;
1435 }
1436 
1437 #define RADIX_BASE		4U
1438 #define COUNTERS_SIZE		(1U << RADIX_BASE)
1439 
1440 static u8 get4bits(u64 num, int shift) {
1441 	u8 low4bits;
1442 
1443 	num >>= shift;
1444 	/* Reverse order */
1445 	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1446 	return low4bits;
1447 }
1448 
1449 /*
1450  * Use 4 bits as radix base
1451  * Use 16 u32 counters for calculating new position in buf array
1452  *
1453  * @array     - array that will be sorted
1454  * @array_buf - buffer array to store sorting results
1455  *              must be equal in size to @array
1456  * @num       - array size
1457  */
1458 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1459 		       int num)
1460 {
1461 	u64 max_num;
1462 	u64 buf_num;
1463 	u32 counters[COUNTERS_SIZE];
1464 	u32 new_addr;
1465 	u32 addr;
1466 	int bitlen;
1467 	int shift;
1468 	int i;
1469 
1470 	/*
1471 	 * Try avoid useless loop iterations for small numbers stored in big
1472 	 * counters.  Example: 48 33 4 ... in 64bit array
1473 	 */
1474 	max_num = array[0].count;
1475 	for (i = 1; i < num; i++) {
1476 		buf_num = array[i].count;
1477 		if (buf_num > max_num)
1478 			max_num = buf_num;
1479 	}
1480 
1481 	buf_num = ilog2(max_num);
1482 	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1483 
1484 	shift = 0;
1485 	while (shift < bitlen) {
1486 		memset(counters, 0, sizeof(counters));
1487 
1488 		for (i = 0; i < num; i++) {
1489 			buf_num = array[i].count;
1490 			addr = get4bits(buf_num, shift);
1491 			counters[addr]++;
1492 		}
1493 
1494 		for (i = 1; i < COUNTERS_SIZE; i++)
1495 			counters[i] += counters[i - 1];
1496 
1497 		for (i = num - 1; i >= 0; i--) {
1498 			buf_num = array[i].count;
1499 			addr = get4bits(buf_num, shift);
1500 			counters[addr]--;
1501 			new_addr = counters[addr];
1502 			array_buf[new_addr] = array[i];
1503 		}
1504 
1505 		shift += RADIX_BASE;
1506 
1507 		/*
1508 		 * Normal radix expects to move data from a temporary array, to
1509 		 * the main one.  But that requires some CPU time. Avoid that
1510 		 * by doing another sort iteration to original array instead of
1511 		 * memcpy()
1512 		 */
1513 		memset(counters, 0, sizeof(counters));
1514 
1515 		for (i = 0; i < num; i ++) {
1516 			buf_num = array_buf[i].count;
1517 			addr = get4bits(buf_num, shift);
1518 			counters[addr]++;
1519 		}
1520 
1521 		for (i = 1; i < COUNTERS_SIZE; i++)
1522 			counters[i] += counters[i - 1];
1523 
1524 		for (i = num - 1; i >= 0; i--) {
1525 			buf_num = array_buf[i].count;
1526 			addr = get4bits(buf_num, shift);
1527 			counters[addr]--;
1528 			new_addr = counters[addr];
1529 			array[new_addr] = array_buf[i];
1530 		}
1531 
1532 		shift += RADIX_BASE;
1533 	}
1534 }
1535 
1536 /*
1537  * Size of the core byte set - how many bytes cover 90% of the sample
1538  *
1539  * There are several types of structured binary data that use nearly all byte
1540  * values. The distribution can be uniform and counts in all buckets will be
1541  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1542  *
1543  * Other possibility is normal (Gaussian) distribution, where the data could
1544  * be potentially compressible, but we have to take a few more steps to decide
1545  * how much.
1546  *
1547  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1548  *                       compression algo can easy fix that
1549  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1550  *                       probability is not compressible
1551  */
1552 #define BYTE_CORE_SET_LOW		(64)
1553 #define BYTE_CORE_SET_HIGH		(200)
1554 
1555 static int byte_core_set_size(struct heuristic_ws *ws)
1556 {
1557 	u32 i;
1558 	u32 coreset_sum = 0;
1559 	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1560 	struct bucket_item *bucket = ws->bucket;
1561 
1562 	/* Sort in reverse order */
1563 	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1564 
1565 	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1566 		coreset_sum += bucket[i].count;
1567 
1568 	if (coreset_sum > core_set_threshold)
1569 		return i;
1570 
1571 	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1572 		coreset_sum += bucket[i].count;
1573 		if (coreset_sum > core_set_threshold)
1574 			break;
1575 	}
1576 
1577 	return i;
1578 }
1579 
1580 /*
1581  * Count byte values in buckets.
1582  * This heuristic can detect textual data (configs, xml, json, html, etc).
1583  * Because in most text-like data byte set is restricted to limited number of
1584  * possible characters, and that restriction in most cases makes data easy to
1585  * compress.
1586  *
1587  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1588  *	less - compressible
1589  *	more - need additional analysis
1590  */
1591 #define BYTE_SET_THRESHOLD		(64)
1592 
1593 static u32 byte_set_size(const struct heuristic_ws *ws)
1594 {
1595 	u32 i;
1596 	u32 byte_set_size = 0;
1597 
1598 	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1599 		if (ws->bucket[i].count > 0)
1600 			byte_set_size++;
1601 	}
1602 
1603 	/*
1604 	 * Continue collecting count of byte values in buckets.  If the byte
1605 	 * set size is bigger then the threshold, it's pointless to continue,
1606 	 * the detection technique would fail for this type of data.
1607 	 */
1608 	for (; i < BUCKET_SIZE; i++) {
1609 		if (ws->bucket[i].count > 0) {
1610 			byte_set_size++;
1611 			if (byte_set_size > BYTE_SET_THRESHOLD)
1612 				return byte_set_size;
1613 		}
1614 	}
1615 
1616 	return byte_set_size;
1617 }
1618 
1619 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1620 {
1621 	const u32 half_of_sample = ws->sample_size / 2;
1622 	const u8 *data = ws->sample;
1623 
1624 	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1625 }
1626 
1627 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1628 				     struct heuristic_ws *ws)
1629 {
1630 	struct page *page;
1631 	u64 index, index_end;
1632 	u32 i, curr_sample_pos;
1633 	u8 *in_data;
1634 
1635 	/*
1636 	 * Compression handles the input data by chunks of 128KiB
1637 	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1638 	 *
1639 	 * We do the same for the heuristic and loop over the whole range.
1640 	 *
1641 	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1642 	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1643 	 */
1644 	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1645 		end = start + BTRFS_MAX_UNCOMPRESSED;
1646 
1647 	index = start >> PAGE_SHIFT;
1648 	index_end = end >> PAGE_SHIFT;
1649 
1650 	/* Don't miss unaligned end */
1651 	if (!IS_ALIGNED(end, PAGE_SIZE))
1652 		index_end++;
1653 
1654 	curr_sample_pos = 0;
1655 	while (index < index_end) {
1656 		page = find_get_page(inode->i_mapping, index);
1657 		in_data = kmap_local_page(page);
1658 		/* Handle case where the start is not aligned to PAGE_SIZE */
1659 		i = start % PAGE_SIZE;
1660 		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1661 			/* Don't sample any garbage from the last page */
1662 			if (start > end - SAMPLING_READ_SIZE)
1663 				break;
1664 			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1665 					SAMPLING_READ_SIZE);
1666 			i += SAMPLING_INTERVAL;
1667 			start += SAMPLING_INTERVAL;
1668 			curr_sample_pos += SAMPLING_READ_SIZE;
1669 		}
1670 		kunmap_local(in_data);
1671 		put_page(page);
1672 
1673 		index++;
1674 	}
1675 
1676 	ws->sample_size = curr_sample_pos;
1677 }
1678 
1679 /*
1680  * Compression heuristic.
1681  *
1682  * For now is's a naive and optimistic 'return true', we'll extend the logic to
1683  * quickly (compared to direct compression) detect data characteristics
1684  * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1685  * data.
1686  *
1687  * The following types of analysis can be performed:
1688  * - detect mostly zero data
1689  * - detect data with low "byte set" size (text, etc)
1690  * - detect data with low/high "core byte" set
1691  *
1692  * Return non-zero if the compression should be done, 0 otherwise.
1693  */
1694 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1695 {
1696 	struct list_head *ws_list = get_workspace(0, 0);
1697 	struct heuristic_ws *ws;
1698 	u32 i;
1699 	u8 byte;
1700 	int ret = 0;
1701 
1702 	ws = list_entry(ws_list, struct heuristic_ws, list);
1703 
1704 	heuristic_collect_sample(inode, start, end, ws);
1705 
1706 	if (sample_repeated_patterns(ws)) {
1707 		ret = 1;
1708 		goto out;
1709 	}
1710 
1711 	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1712 
1713 	for (i = 0; i < ws->sample_size; i++) {
1714 		byte = ws->sample[i];
1715 		ws->bucket[byte].count++;
1716 	}
1717 
1718 	i = byte_set_size(ws);
1719 	if (i < BYTE_SET_THRESHOLD) {
1720 		ret = 2;
1721 		goto out;
1722 	}
1723 
1724 	i = byte_core_set_size(ws);
1725 	if (i <= BYTE_CORE_SET_LOW) {
1726 		ret = 3;
1727 		goto out;
1728 	}
1729 
1730 	if (i >= BYTE_CORE_SET_HIGH) {
1731 		ret = 0;
1732 		goto out;
1733 	}
1734 
1735 	i = shannon_entropy(ws);
1736 	if (i <= ENTROPY_LVL_ACEPTABLE) {
1737 		ret = 4;
1738 		goto out;
1739 	}
1740 
1741 	/*
1742 	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1743 	 * needed to give green light to compression.
1744 	 *
1745 	 * For now just assume that compression at that level is not worth the
1746 	 * resources because:
1747 	 *
1748 	 * 1. it is possible to defrag the data later
1749 	 *
1750 	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1751 	 * values, every bucket has counter at level ~54. The heuristic would
1752 	 * be confused. This can happen when data have some internal repeated
1753 	 * patterns like "abbacbbc...". This can be detected by analyzing
1754 	 * pairs of bytes, which is too costly.
1755 	 */
1756 	if (i < ENTROPY_LVL_HIGH) {
1757 		ret = 5;
1758 		goto out;
1759 	} else {
1760 		ret = 0;
1761 		goto out;
1762 	}
1763 
1764 out:
1765 	put_workspace(0, ws_list);
1766 	return ret;
1767 }
1768 
1769 /*
1770  * Convert the compression suffix (eg. after "zlib" starting with ":") to
1771  * level, unrecognized string will set the default level
1772  */
1773 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1774 {
1775 	unsigned int level = 0;
1776 	int ret;
1777 
1778 	if (!type)
1779 		return 0;
1780 
1781 	if (str[0] == ':') {
1782 		ret = kstrtouint(str + 1, 10, &level);
1783 		if (ret)
1784 			level = 0;
1785 	}
1786 
1787 	level = btrfs_compress_set_level(type, level);
1788 
1789 	return level;
1790 }
1791