xref: /openbmc/linux/fs/btrfs/compression.c (revision 84c43674)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "ordered-data.h"
42 #include "compression.h"
43 #include "extent_io.h"
44 #include "extent_map.h"
45 
46 static int btrfs_decompress_bio(struct compressed_bio *cb);
47 
48 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
49 				      unsigned long disk_size)
50 {
51 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
52 
53 	return sizeof(struct compressed_bio) +
54 		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
55 }
56 
57 static int check_compressed_csum(struct btrfs_inode *inode,
58 				 struct compressed_bio *cb,
59 				 u64 disk_start)
60 {
61 	int ret;
62 	struct page *page;
63 	unsigned long i;
64 	char *kaddr;
65 	u32 csum;
66 	u32 *cb_sum = &cb->sums;
67 
68 	if (inode->flags & BTRFS_INODE_NODATASUM)
69 		return 0;
70 
71 	for (i = 0; i < cb->nr_pages; i++) {
72 		page = cb->compressed_pages[i];
73 		csum = ~(u32)0;
74 
75 		kaddr = kmap_atomic(page);
76 		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
77 		btrfs_csum_final(csum, (u8 *)&csum);
78 		kunmap_atomic(kaddr);
79 
80 		if (csum != *cb_sum) {
81 			btrfs_print_data_csum_error(inode, disk_start, csum,
82 					*cb_sum, cb->mirror_num);
83 			ret = -EIO;
84 			goto fail;
85 		}
86 		cb_sum++;
87 
88 	}
89 	ret = 0;
90 fail:
91 	return ret;
92 }
93 
94 /* when we finish reading compressed pages from the disk, we
95  * decompress them and then run the bio end_io routines on the
96  * decompressed pages (in the inode address space).
97  *
98  * This allows the checksumming and other IO error handling routines
99  * to work normally
100  *
101  * The compressed pages are freed here, and it must be run
102  * in process context
103  */
104 static void end_compressed_bio_read(struct bio *bio)
105 {
106 	struct compressed_bio *cb = bio->bi_private;
107 	struct inode *inode;
108 	struct page *page;
109 	unsigned long index;
110 	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
111 	int ret = 0;
112 
113 	if (bio->bi_status)
114 		cb->errors = 1;
115 
116 	/* if there are more bios still pending for this compressed
117 	 * extent, just exit
118 	 */
119 	if (!refcount_dec_and_test(&cb->pending_bios))
120 		goto out;
121 
122 	/*
123 	 * Record the correct mirror_num in cb->orig_bio so that
124 	 * read-repair can work properly.
125 	 */
126 	ASSERT(btrfs_io_bio(cb->orig_bio));
127 	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
128 	cb->mirror_num = mirror;
129 
130 	/*
131 	 * Some IO in this cb have failed, just skip checksum as there
132 	 * is no way it could be correct.
133 	 */
134 	if (cb->errors == 1)
135 		goto csum_failed;
136 
137 	inode = cb->inode;
138 	ret = check_compressed_csum(BTRFS_I(inode), cb,
139 				    (u64)bio->bi_iter.bi_sector << 9);
140 	if (ret)
141 		goto csum_failed;
142 
143 	/* ok, we're the last bio for this extent, lets start
144 	 * the decompression.
145 	 */
146 	ret = btrfs_decompress_bio(cb);
147 
148 csum_failed:
149 	if (ret)
150 		cb->errors = 1;
151 
152 	/* release the compressed pages */
153 	index = 0;
154 	for (index = 0; index < cb->nr_pages; index++) {
155 		page = cb->compressed_pages[index];
156 		page->mapping = NULL;
157 		put_page(page);
158 	}
159 
160 	/* do io completion on the original bio */
161 	if (cb->errors) {
162 		bio_io_error(cb->orig_bio);
163 	} else {
164 		int i;
165 		struct bio_vec *bvec;
166 
167 		/*
168 		 * we have verified the checksum already, set page
169 		 * checked so the end_io handlers know about it
170 		 */
171 		ASSERT(!bio_flagged(bio, BIO_CLONED));
172 		bio_for_each_segment_all(bvec, cb->orig_bio, i)
173 			SetPageChecked(bvec->bv_page);
174 
175 		bio_endio(cb->orig_bio);
176 	}
177 
178 	/* finally free the cb struct */
179 	kfree(cb->compressed_pages);
180 	kfree(cb);
181 out:
182 	bio_put(bio);
183 }
184 
185 /*
186  * Clear the writeback bits on all of the file
187  * pages for a compressed write
188  */
189 static noinline void end_compressed_writeback(struct inode *inode,
190 					      const struct compressed_bio *cb)
191 {
192 	unsigned long index = cb->start >> PAGE_SHIFT;
193 	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
194 	struct page *pages[16];
195 	unsigned long nr_pages = end_index - index + 1;
196 	int i;
197 	int ret;
198 
199 	if (cb->errors)
200 		mapping_set_error(inode->i_mapping, -EIO);
201 
202 	while (nr_pages > 0) {
203 		ret = find_get_pages_contig(inode->i_mapping, index,
204 				     min_t(unsigned long,
205 				     nr_pages, ARRAY_SIZE(pages)), pages);
206 		if (ret == 0) {
207 			nr_pages -= 1;
208 			index += 1;
209 			continue;
210 		}
211 		for (i = 0; i < ret; i++) {
212 			if (cb->errors)
213 				SetPageError(pages[i]);
214 			end_page_writeback(pages[i]);
215 			put_page(pages[i]);
216 		}
217 		nr_pages -= ret;
218 		index += ret;
219 	}
220 	/* the inode may be gone now */
221 }
222 
223 /*
224  * do the cleanup once all the compressed pages hit the disk.
225  * This will clear writeback on the file pages and free the compressed
226  * pages.
227  *
228  * This also calls the writeback end hooks for the file pages so that
229  * metadata and checksums can be updated in the file.
230  */
231 static void end_compressed_bio_write(struct bio *bio)
232 {
233 	struct extent_io_tree *tree;
234 	struct compressed_bio *cb = bio->bi_private;
235 	struct inode *inode;
236 	struct page *page;
237 	unsigned long index;
238 
239 	if (bio->bi_status)
240 		cb->errors = 1;
241 
242 	/* if there are more bios still pending for this compressed
243 	 * extent, just exit
244 	 */
245 	if (!refcount_dec_and_test(&cb->pending_bios))
246 		goto out;
247 
248 	/* ok, we're the last bio for this extent, step one is to
249 	 * call back into the FS and do all the end_io operations
250 	 */
251 	inode = cb->inode;
252 	tree = &BTRFS_I(inode)->io_tree;
253 	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
254 	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
255 					 cb->start,
256 					 cb->start + cb->len - 1,
257 					 NULL,
258 					 bio->bi_status ? 0 : 1);
259 	cb->compressed_pages[0]->mapping = NULL;
260 
261 	end_compressed_writeback(inode, cb);
262 	/* note, our inode could be gone now */
263 
264 	/*
265 	 * release the compressed pages, these came from alloc_page and
266 	 * are not attached to the inode at all
267 	 */
268 	index = 0;
269 	for (index = 0; index < cb->nr_pages; index++) {
270 		page = cb->compressed_pages[index];
271 		page->mapping = NULL;
272 		put_page(page);
273 	}
274 
275 	/* finally free the cb struct */
276 	kfree(cb->compressed_pages);
277 	kfree(cb);
278 out:
279 	bio_put(bio);
280 }
281 
282 /*
283  * worker function to build and submit bios for previously compressed pages.
284  * The corresponding pages in the inode should be marked for writeback
285  * and the compressed pages should have a reference on them for dropping
286  * when the IO is complete.
287  *
288  * This also checksums the file bytes and gets things ready for
289  * the end io hooks.
290  */
291 blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
292 				 unsigned long len, u64 disk_start,
293 				 unsigned long compressed_len,
294 				 struct page **compressed_pages,
295 				 unsigned long nr_pages)
296 {
297 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
298 	struct bio *bio = NULL;
299 	struct compressed_bio *cb;
300 	unsigned long bytes_left;
301 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
302 	int pg_index = 0;
303 	struct page *page;
304 	u64 first_byte = disk_start;
305 	struct block_device *bdev;
306 	blk_status_t ret;
307 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
308 
309 	WARN_ON(start & ((u64)PAGE_SIZE - 1));
310 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
311 	if (!cb)
312 		return BLK_STS_RESOURCE;
313 	refcount_set(&cb->pending_bios, 0);
314 	cb->errors = 0;
315 	cb->inode = inode;
316 	cb->start = start;
317 	cb->len = len;
318 	cb->mirror_num = 0;
319 	cb->compressed_pages = compressed_pages;
320 	cb->compressed_len = compressed_len;
321 	cb->orig_bio = NULL;
322 	cb->nr_pages = nr_pages;
323 
324 	bdev = fs_info->fs_devices->latest_bdev;
325 
326 	bio = btrfs_bio_alloc(bdev, first_byte);
327 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
328 	bio->bi_private = cb;
329 	bio->bi_end_io = end_compressed_bio_write;
330 	refcount_set(&cb->pending_bios, 1);
331 
332 	/* create and submit bios for the compressed pages */
333 	bytes_left = compressed_len;
334 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
335 		int submit = 0;
336 
337 		page = compressed_pages[pg_index];
338 		page->mapping = inode->i_mapping;
339 		if (bio->bi_iter.bi_size)
340 			submit = io_tree->ops->merge_bio_hook(page, 0,
341 							   PAGE_SIZE,
342 							   bio, 0);
343 
344 		page->mapping = NULL;
345 		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
346 		    PAGE_SIZE) {
347 			bio_get(bio);
348 
349 			/*
350 			 * inc the count before we submit the bio so
351 			 * we know the end IO handler won't happen before
352 			 * we inc the count.  Otherwise, the cb might get
353 			 * freed before we're done setting it up
354 			 */
355 			refcount_inc(&cb->pending_bios);
356 			ret = btrfs_bio_wq_end_io(fs_info, bio,
357 						  BTRFS_WQ_ENDIO_DATA);
358 			BUG_ON(ret); /* -ENOMEM */
359 
360 			if (!skip_sum) {
361 				ret = btrfs_csum_one_bio(inode, bio, start, 1);
362 				BUG_ON(ret); /* -ENOMEM */
363 			}
364 
365 			ret = btrfs_map_bio(fs_info, bio, 0, 1);
366 			if (ret) {
367 				bio->bi_status = ret;
368 				bio_endio(bio);
369 			}
370 
371 			bio_put(bio);
372 
373 			bio = btrfs_bio_alloc(bdev, first_byte);
374 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
375 			bio->bi_private = cb;
376 			bio->bi_end_io = end_compressed_bio_write;
377 			bio_add_page(bio, page, PAGE_SIZE, 0);
378 		}
379 		if (bytes_left < PAGE_SIZE) {
380 			btrfs_info(fs_info,
381 					"bytes left %lu compress len %lu nr %lu",
382 			       bytes_left, cb->compressed_len, cb->nr_pages);
383 		}
384 		bytes_left -= PAGE_SIZE;
385 		first_byte += PAGE_SIZE;
386 		cond_resched();
387 	}
388 	bio_get(bio);
389 
390 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
391 	BUG_ON(ret); /* -ENOMEM */
392 
393 	if (!skip_sum) {
394 		ret = btrfs_csum_one_bio(inode, bio, start, 1);
395 		BUG_ON(ret); /* -ENOMEM */
396 	}
397 
398 	ret = btrfs_map_bio(fs_info, bio, 0, 1);
399 	if (ret) {
400 		bio->bi_status = ret;
401 		bio_endio(bio);
402 	}
403 
404 	bio_put(bio);
405 	return 0;
406 }
407 
408 static u64 bio_end_offset(struct bio *bio)
409 {
410 	struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
411 
412 	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
413 }
414 
415 static noinline int add_ra_bio_pages(struct inode *inode,
416 				     u64 compressed_end,
417 				     struct compressed_bio *cb)
418 {
419 	unsigned long end_index;
420 	unsigned long pg_index;
421 	u64 last_offset;
422 	u64 isize = i_size_read(inode);
423 	int ret;
424 	struct page *page;
425 	unsigned long nr_pages = 0;
426 	struct extent_map *em;
427 	struct address_space *mapping = inode->i_mapping;
428 	struct extent_map_tree *em_tree;
429 	struct extent_io_tree *tree;
430 	u64 end;
431 	int misses = 0;
432 
433 	last_offset = bio_end_offset(cb->orig_bio);
434 	em_tree = &BTRFS_I(inode)->extent_tree;
435 	tree = &BTRFS_I(inode)->io_tree;
436 
437 	if (isize == 0)
438 		return 0;
439 
440 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
441 
442 	while (last_offset < compressed_end) {
443 		pg_index = last_offset >> PAGE_SHIFT;
444 
445 		if (pg_index > end_index)
446 			break;
447 
448 		rcu_read_lock();
449 		page = radix_tree_lookup(&mapping->page_tree, pg_index);
450 		rcu_read_unlock();
451 		if (page && !radix_tree_exceptional_entry(page)) {
452 			misses++;
453 			if (misses > 4)
454 				break;
455 			goto next;
456 		}
457 
458 		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
459 								 ~__GFP_FS));
460 		if (!page)
461 			break;
462 
463 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
464 			put_page(page);
465 			goto next;
466 		}
467 
468 		end = last_offset + PAGE_SIZE - 1;
469 		/*
470 		 * at this point, we have a locked page in the page cache
471 		 * for these bytes in the file.  But, we have to make
472 		 * sure they map to this compressed extent on disk.
473 		 */
474 		set_page_extent_mapped(page);
475 		lock_extent(tree, last_offset, end);
476 		read_lock(&em_tree->lock);
477 		em = lookup_extent_mapping(em_tree, last_offset,
478 					   PAGE_SIZE);
479 		read_unlock(&em_tree->lock);
480 
481 		if (!em || last_offset < em->start ||
482 		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
483 		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
484 			free_extent_map(em);
485 			unlock_extent(tree, last_offset, end);
486 			unlock_page(page);
487 			put_page(page);
488 			break;
489 		}
490 		free_extent_map(em);
491 
492 		if (page->index == end_index) {
493 			char *userpage;
494 			size_t zero_offset = isize & (PAGE_SIZE - 1);
495 
496 			if (zero_offset) {
497 				int zeros;
498 				zeros = PAGE_SIZE - zero_offset;
499 				userpage = kmap_atomic(page);
500 				memset(userpage + zero_offset, 0, zeros);
501 				flush_dcache_page(page);
502 				kunmap_atomic(userpage);
503 			}
504 		}
505 
506 		ret = bio_add_page(cb->orig_bio, page,
507 				   PAGE_SIZE, 0);
508 
509 		if (ret == PAGE_SIZE) {
510 			nr_pages++;
511 			put_page(page);
512 		} else {
513 			unlock_extent(tree, last_offset, end);
514 			unlock_page(page);
515 			put_page(page);
516 			break;
517 		}
518 next:
519 		last_offset += PAGE_SIZE;
520 	}
521 	return 0;
522 }
523 
524 /*
525  * for a compressed read, the bio we get passed has all the inode pages
526  * in it.  We don't actually do IO on those pages but allocate new ones
527  * to hold the compressed pages on disk.
528  *
529  * bio->bi_iter.bi_sector points to the compressed extent on disk
530  * bio->bi_io_vec points to all of the inode pages
531  *
532  * After the compressed pages are read, we copy the bytes into the
533  * bio we were passed and then call the bio end_io calls
534  */
535 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
536 				 int mirror_num, unsigned long bio_flags)
537 {
538 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
539 	struct extent_io_tree *tree;
540 	struct extent_map_tree *em_tree;
541 	struct compressed_bio *cb;
542 	unsigned long compressed_len;
543 	unsigned long nr_pages;
544 	unsigned long pg_index;
545 	struct page *page;
546 	struct block_device *bdev;
547 	struct bio *comp_bio;
548 	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
549 	u64 em_len;
550 	u64 em_start;
551 	struct extent_map *em;
552 	blk_status_t ret = BLK_STS_RESOURCE;
553 	int faili = 0;
554 	u32 *sums;
555 
556 	tree = &BTRFS_I(inode)->io_tree;
557 	em_tree = &BTRFS_I(inode)->extent_tree;
558 
559 	/* we need the actual starting offset of this extent in the file */
560 	read_lock(&em_tree->lock);
561 	em = lookup_extent_mapping(em_tree,
562 				   page_offset(bio->bi_io_vec->bv_page),
563 				   PAGE_SIZE);
564 	read_unlock(&em_tree->lock);
565 	if (!em)
566 		return BLK_STS_IOERR;
567 
568 	compressed_len = em->block_len;
569 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
570 	if (!cb)
571 		goto out;
572 
573 	refcount_set(&cb->pending_bios, 0);
574 	cb->errors = 0;
575 	cb->inode = inode;
576 	cb->mirror_num = mirror_num;
577 	sums = &cb->sums;
578 
579 	cb->start = em->orig_start;
580 	em_len = em->len;
581 	em_start = em->start;
582 
583 	free_extent_map(em);
584 	em = NULL;
585 
586 	cb->len = bio->bi_iter.bi_size;
587 	cb->compressed_len = compressed_len;
588 	cb->compress_type = extent_compress_type(bio_flags);
589 	cb->orig_bio = bio;
590 
591 	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
592 	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
593 				       GFP_NOFS);
594 	if (!cb->compressed_pages)
595 		goto fail1;
596 
597 	bdev = fs_info->fs_devices->latest_bdev;
598 
599 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
600 		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
601 							      __GFP_HIGHMEM);
602 		if (!cb->compressed_pages[pg_index]) {
603 			faili = pg_index - 1;
604 			ret = BLK_STS_RESOURCE;
605 			goto fail2;
606 		}
607 	}
608 	faili = nr_pages - 1;
609 	cb->nr_pages = nr_pages;
610 
611 	add_ra_bio_pages(inode, em_start + em_len, cb);
612 
613 	/* include any pages we added in add_ra-bio_pages */
614 	cb->len = bio->bi_iter.bi_size;
615 
616 	comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
617 	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
618 	comp_bio->bi_private = cb;
619 	comp_bio->bi_end_io = end_compressed_bio_read;
620 	refcount_set(&cb->pending_bios, 1);
621 
622 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
623 		int submit = 0;
624 
625 		page = cb->compressed_pages[pg_index];
626 		page->mapping = inode->i_mapping;
627 		page->index = em_start >> PAGE_SHIFT;
628 
629 		if (comp_bio->bi_iter.bi_size)
630 			submit = tree->ops->merge_bio_hook(page, 0,
631 							PAGE_SIZE,
632 							comp_bio, 0);
633 
634 		page->mapping = NULL;
635 		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
636 		    PAGE_SIZE) {
637 			bio_get(comp_bio);
638 
639 			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
640 						  BTRFS_WQ_ENDIO_DATA);
641 			BUG_ON(ret); /* -ENOMEM */
642 
643 			/*
644 			 * inc the count before we submit the bio so
645 			 * we know the end IO handler won't happen before
646 			 * we inc the count.  Otherwise, the cb might get
647 			 * freed before we're done setting it up
648 			 */
649 			refcount_inc(&cb->pending_bios);
650 
651 			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
652 				ret = btrfs_lookup_bio_sums(inode, comp_bio,
653 							    sums);
654 				BUG_ON(ret); /* -ENOMEM */
655 			}
656 			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
657 					     fs_info->sectorsize);
658 
659 			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
660 			if (ret) {
661 				comp_bio->bi_status = ret;
662 				bio_endio(comp_bio);
663 			}
664 
665 			bio_put(comp_bio);
666 
667 			comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
668 			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
669 			comp_bio->bi_private = cb;
670 			comp_bio->bi_end_io = end_compressed_bio_read;
671 
672 			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
673 		}
674 		cur_disk_byte += PAGE_SIZE;
675 	}
676 	bio_get(comp_bio);
677 
678 	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
679 	BUG_ON(ret); /* -ENOMEM */
680 
681 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
682 		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
683 		BUG_ON(ret); /* -ENOMEM */
684 	}
685 
686 	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
687 	if (ret) {
688 		comp_bio->bi_status = ret;
689 		bio_endio(comp_bio);
690 	}
691 
692 	bio_put(comp_bio);
693 	return 0;
694 
695 fail2:
696 	while (faili >= 0) {
697 		__free_page(cb->compressed_pages[faili]);
698 		faili--;
699 	}
700 
701 	kfree(cb->compressed_pages);
702 fail1:
703 	kfree(cb);
704 out:
705 	free_extent_map(em);
706 	return ret;
707 }
708 
709 static struct {
710 	struct list_head idle_ws;
711 	spinlock_t ws_lock;
712 	/* Number of free workspaces */
713 	int free_ws;
714 	/* Total number of allocated workspaces */
715 	atomic_t total_ws;
716 	/* Waiters for a free workspace */
717 	wait_queue_head_t ws_wait;
718 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
719 
720 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
721 	&btrfs_zlib_compress,
722 	&btrfs_lzo_compress,
723 	&btrfs_zstd_compress,
724 };
725 
726 void __init btrfs_init_compress(void)
727 {
728 	int i;
729 
730 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
731 		struct list_head *workspace;
732 
733 		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
734 		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
735 		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
736 		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
737 
738 		/*
739 		 * Preallocate one workspace for each compression type so
740 		 * we can guarantee forward progress in the worst case
741 		 */
742 		workspace = btrfs_compress_op[i]->alloc_workspace();
743 		if (IS_ERR(workspace)) {
744 			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
745 		} else {
746 			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
747 			btrfs_comp_ws[i].free_ws = 1;
748 			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
749 		}
750 	}
751 }
752 
753 /*
754  * This finds an available workspace or allocates a new one.
755  * If it's not possible to allocate a new one, waits until there's one.
756  * Preallocation makes a forward progress guarantees and we do not return
757  * errors.
758  */
759 static struct list_head *find_workspace(int type)
760 {
761 	struct list_head *workspace;
762 	int cpus = num_online_cpus();
763 	int idx = type - 1;
764 	unsigned nofs_flag;
765 
766 	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
767 	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
768 	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
769 	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
770 	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
771 again:
772 	spin_lock(ws_lock);
773 	if (!list_empty(idle_ws)) {
774 		workspace = idle_ws->next;
775 		list_del(workspace);
776 		(*free_ws)--;
777 		spin_unlock(ws_lock);
778 		return workspace;
779 
780 	}
781 	if (atomic_read(total_ws) > cpus) {
782 		DEFINE_WAIT(wait);
783 
784 		spin_unlock(ws_lock);
785 		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
786 		if (atomic_read(total_ws) > cpus && !*free_ws)
787 			schedule();
788 		finish_wait(ws_wait, &wait);
789 		goto again;
790 	}
791 	atomic_inc(total_ws);
792 	spin_unlock(ws_lock);
793 
794 	/*
795 	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
796 	 * to turn it off here because we might get called from the restricted
797 	 * context of btrfs_compress_bio/btrfs_compress_pages
798 	 */
799 	nofs_flag = memalloc_nofs_save();
800 	workspace = btrfs_compress_op[idx]->alloc_workspace();
801 	memalloc_nofs_restore(nofs_flag);
802 
803 	if (IS_ERR(workspace)) {
804 		atomic_dec(total_ws);
805 		wake_up(ws_wait);
806 
807 		/*
808 		 * Do not return the error but go back to waiting. There's a
809 		 * workspace preallocated for each type and the compression
810 		 * time is bounded so we get to a workspace eventually. This
811 		 * makes our caller's life easier.
812 		 *
813 		 * To prevent silent and low-probability deadlocks (when the
814 		 * initial preallocation fails), check if there are any
815 		 * workspaces at all.
816 		 */
817 		if (atomic_read(total_ws) == 0) {
818 			static DEFINE_RATELIMIT_STATE(_rs,
819 					/* once per minute */ 60 * HZ,
820 					/* no burst */ 1);
821 
822 			if (__ratelimit(&_rs)) {
823 				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
824 			}
825 		}
826 		goto again;
827 	}
828 	return workspace;
829 }
830 
831 /*
832  * put a workspace struct back on the list or free it if we have enough
833  * idle ones sitting around
834  */
835 static void free_workspace(int type, struct list_head *workspace)
836 {
837 	int idx = type - 1;
838 	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
839 	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
840 	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
841 	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
842 	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
843 
844 	spin_lock(ws_lock);
845 	if (*free_ws <= num_online_cpus()) {
846 		list_add(workspace, idle_ws);
847 		(*free_ws)++;
848 		spin_unlock(ws_lock);
849 		goto wake;
850 	}
851 	spin_unlock(ws_lock);
852 
853 	btrfs_compress_op[idx]->free_workspace(workspace);
854 	atomic_dec(total_ws);
855 wake:
856 	/*
857 	 * Make sure counter is updated before we wake up waiters.
858 	 */
859 	smp_mb();
860 	if (waitqueue_active(ws_wait))
861 		wake_up(ws_wait);
862 }
863 
864 /*
865  * cleanup function for module exit
866  */
867 static void free_workspaces(void)
868 {
869 	struct list_head *workspace;
870 	int i;
871 
872 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
873 		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
874 			workspace = btrfs_comp_ws[i].idle_ws.next;
875 			list_del(workspace);
876 			btrfs_compress_op[i]->free_workspace(workspace);
877 			atomic_dec(&btrfs_comp_ws[i].total_ws);
878 		}
879 	}
880 }
881 
882 /*
883  * Given an address space and start and length, compress the bytes into @pages
884  * that are allocated on demand.
885  *
886  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
887  * and returns number of actually allocated pages
888  *
889  * @total_in is used to return the number of bytes actually read.  It
890  * may be smaller than the input length if we had to exit early because we
891  * ran out of room in the pages array or because we cross the
892  * max_out threshold.
893  *
894  * @total_out is an in/out parameter, must be set to the input length and will
895  * be also used to return the total number of compressed bytes
896  *
897  * @max_out tells us the max number of bytes that we're allowed to
898  * stuff into pages
899  */
900 int btrfs_compress_pages(int type, struct address_space *mapping,
901 			 u64 start, struct page **pages,
902 			 unsigned long *out_pages,
903 			 unsigned long *total_in,
904 			 unsigned long *total_out)
905 {
906 	struct list_head *workspace;
907 	int ret;
908 
909 	workspace = find_workspace(type);
910 
911 	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
912 						      start, pages,
913 						      out_pages,
914 						      total_in, total_out);
915 	free_workspace(type, workspace);
916 	return ret;
917 }
918 
919 /*
920  * pages_in is an array of pages with compressed data.
921  *
922  * disk_start is the starting logical offset of this array in the file
923  *
924  * orig_bio contains the pages from the file that we want to decompress into
925  *
926  * srclen is the number of bytes in pages_in
927  *
928  * The basic idea is that we have a bio that was created by readpages.
929  * The pages in the bio are for the uncompressed data, and they may not
930  * be contiguous.  They all correspond to the range of bytes covered by
931  * the compressed extent.
932  */
933 static int btrfs_decompress_bio(struct compressed_bio *cb)
934 {
935 	struct list_head *workspace;
936 	int ret;
937 	int type = cb->compress_type;
938 
939 	workspace = find_workspace(type);
940 	ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
941 	free_workspace(type, workspace);
942 
943 	return ret;
944 }
945 
946 /*
947  * a less complex decompression routine.  Our compressed data fits in a
948  * single page, and we want to read a single page out of it.
949  * start_byte tells us the offset into the compressed data we're interested in
950  */
951 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
952 		     unsigned long start_byte, size_t srclen, size_t destlen)
953 {
954 	struct list_head *workspace;
955 	int ret;
956 
957 	workspace = find_workspace(type);
958 
959 	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
960 						  dest_page, start_byte,
961 						  srclen, destlen);
962 
963 	free_workspace(type, workspace);
964 	return ret;
965 }
966 
967 void btrfs_exit_compress(void)
968 {
969 	free_workspaces();
970 }
971 
972 /*
973  * Copy uncompressed data from working buffer to pages.
974  *
975  * buf_start is the byte offset we're of the start of our workspace buffer.
976  *
977  * total_out is the last byte of the buffer
978  */
979 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
980 			      unsigned long total_out, u64 disk_start,
981 			      struct bio *bio)
982 {
983 	unsigned long buf_offset;
984 	unsigned long current_buf_start;
985 	unsigned long start_byte;
986 	unsigned long prev_start_byte;
987 	unsigned long working_bytes = total_out - buf_start;
988 	unsigned long bytes;
989 	char *kaddr;
990 	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
991 
992 	/*
993 	 * start byte is the first byte of the page we're currently
994 	 * copying into relative to the start of the compressed data.
995 	 */
996 	start_byte = page_offset(bvec.bv_page) - disk_start;
997 
998 	/* we haven't yet hit data corresponding to this page */
999 	if (total_out <= start_byte)
1000 		return 1;
1001 
1002 	/*
1003 	 * the start of the data we care about is offset into
1004 	 * the middle of our working buffer
1005 	 */
1006 	if (total_out > start_byte && buf_start < start_byte) {
1007 		buf_offset = start_byte - buf_start;
1008 		working_bytes -= buf_offset;
1009 	} else {
1010 		buf_offset = 0;
1011 	}
1012 	current_buf_start = buf_start;
1013 
1014 	/* copy bytes from the working buffer into the pages */
1015 	while (working_bytes > 0) {
1016 		bytes = min_t(unsigned long, bvec.bv_len,
1017 				PAGE_SIZE - buf_offset);
1018 		bytes = min(bytes, working_bytes);
1019 
1020 		kaddr = kmap_atomic(bvec.bv_page);
1021 		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1022 		kunmap_atomic(kaddr);
1023 		flush_dcache_page(bvec.bv_page);
1024 
1025 		buf_offset += bytes;
1026 		working_bytes -= bytes;
1027 		current_buf_start += bytes;
1028 
1029 		/* check if we need to pick another page */
1030 		bio_advance(bio, bytes);
1031 		if (!bio->bi_iter.bi_size)
1032 			return 0;
1033 		bvec = bio_iter_iovec(bio, bio->bi_iter);
1034 		prev_start_byte = start_byte;
1035 		start_byte = page_offset(bvec.bv_page) - disk_start;
1036 
1037 		/*
1038 		 * We need to make sure we're only adjusting
1039 		 * our offset into compression working buffer when
1040 		 * we're switching pages.  Otherwise we can incorrectly
1041 		 * keep copying when we were actually done.
1042 		 */
1043 		if (start_byte != prev_start_byte) {
1044 			/*
1045 			 * make sure our new page is covered by this
1046 			 * working buffer
1047 			 */
1048 			if (total_out <= start_byte)
1049 				return 1;
1050 
1051 			/*
1052 			 * the next page in the biovec might not be adjacent
1053 			 * to the last page, but it might still be found
1054 			 * inside this working buffer. bump our offset pointer
1055 			 */
1056 			if (total_out > start_byte &&
1057 			    current_buf_start < start_byte) {
1058 				buf_offset = start_byte - buf_start;
1059 				working_bytes = total_out - start_byte;
1060 				current_buf_start = buf_start + buf_offset;
1061 			}
1062 		}
1063 	}
1064 
1065 	return 1;
1066 }
1067 
1068 /*
1069  * Compression heuristic.
1070  *
1071  * For now is's a naive and optimistic 'return true', we'll extend the logic to
1072  * quickly (compared to direct compression) detect data characteristics
1073  * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1074  * data.
1075  *
1076  * The following types of analysis can be performed:
1077  * - detect mostly zero data
1078  * - detect data with low "byte set" size (text, etc)
1079  * - detect data with low/high "core byte" set
1080  *
1081  * Return non-zero if the compression should be done, 0 otherwise.
1082  */
1083 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1084 {
1085 	u64 index = start >> PAGE_SHIFT;
1086 	u64 end_index = end >> PAGE_SHIFT;
1087 	struct page *page;
1088 	int ret = 1;
1089 
1090 	while (index <= end_index) {
1091 		page = find_get_page(inode->i_mapping, index);
1092 		kmap(page);
1093 		kunmap(page);
1094 		put_page(page);
1095 		index++;
1096 	}
1097 
1098 	return ret;
1099 }
1100