xref: /openbmc/linux/fs/btrfs/compression.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "ordered-data.h"
41 #include "compression.h"
42 #include "extent_io.h"
43 #include "extent_map.h"
44 
45 struct compressed_bio {
46 	/* number of bios pending for this compressed extent */
47 	atomic_t pending_bios;
48 
49 	/* the pages with the compressed data on them */
50 	struct page **compressed_pages;
51 
52 	/* inode that owns this data */
53 	struct inode *inode;
54 
55 	/* starting offset in the inode for our pages */
56 	u64 start;
57 
58 	/* number of bytes in the inode we're working on */
59 	unsigned long len;
60 
61 	/* number of bytes on disk */
62 	unsigned long compressed_len;
63 
64 	/* the compression algorithm for this bio */
65 	int compress_type;
66 
67 	/* number of compressed pages in the array */
68 	unsigned long nr_pages;
69 
70 	/* IO errors */
71 	int errors;
72 	int mirror_num;
73 
74 	/* for reads, this is the bio we are copying the data into */
75 	struct bio *orig_bio;
76 
77 	/*
78 	 * the start of a variable length array of checksums only
79 	 * used by reads
80 	 */
81 	u32 sums;
82 };
83 
84 static int btrfs_decompress_bio(int type, struct page **pages_in,
85 				   u64 disk_start, struct bio *orig_bio,
86 				   size_t srclen);
87 
88 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
89 				      unsigned long disk_size)
90 {
91 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
92 
93 	return sizeof(struct compressed_bio) +
94 		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
95 }
96 
97 static struct bio *compressed_bio_alloc(struct block_device *bdev,
98 					u64 first_byte, gfp_t gfp_flags)
99 {
100 	return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
101 }
102 
103 static int check_compressed_csum(struct inode *inode,
104 				 struct compressed_bio *cb,
105 				 u64 disk_start)
106 {
107 	int ret;
108 	struct page *page;
109 	unsigned long i;
110 	char *kaddr;
111 	u32 csum;
112 	u32 *cb_sum = &cb->sums;
113 
114 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
115 		return 0;
116 
117 	for (i = 0; i < cb->nr_pages; i++) {
118 		page = cb->compressed_pages[i];
119 		csum = ~(u32)0;
120 
121 		kaddr = kmap_atomic(page);
122 		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 		btrfs_csum_final(csum, (u8 *)&csum);
124 		kunmap_atomic(kaddr);
125 
126 		if (csum != *cb_sum) {
127 			btrfs_info(BTRFS_I(inode)->root->fs_info,
128 			   "csum failed ino %llu extent %llu csum %u wanted %u mirror %d",
129 			   btrfs_ino(inode), disk_start, csum, *cb_sum,
130 			   cb->mirror_num);
131 			ret = -EIO;
132 			goto fail;
133 		}
134 		cb_sum++;
135 
136 	}
137 	ret = 0;
138 fail:
139 	return ret;
140 }
141 
142 /* when we finish reading compressed pages from the disk, we
143  * decompress them and then run the bio end_io routines on the
144  * decompressed pages (in the inode address space).
145  *
146  * This allows the checksumming and other IO error handling routines
147  * to work normally
148  *
149  * The compressed pages are freed here, and it must be run
150  * in process context
151  */
152 static void end_compressed_bio_read(struct bio *bio)
153 {
154 	struct compressed_bio *cb = bio->bi_private;
155 	struct inode *inode;
156 	struct page *page;
157 	unsigned long index;
158 	int ret;
159 
160 	if (bio->bi_error)
161 		cb->errors = 1;
162 
163 	/* if there are more bios still pending for this compressed
164 	 * extent, just exit
165 	 */
166 	if (!atomic_dec_and_test(&cb->pending_bios))
167 		goto out;
168 
169 	inode = cb->inode;
170 	ret = check_compressed_csum(inode, cb,
171 				    (u64)bio->bi_iter.bi_sector << 9);
172 	if (ret)
173 		goto csum_failed;
174 
175 	/* ok, we're the last bio for this extent, lets start
176 	 * the decompression.
177 	 */
178 	ret = btrfs_decompress_bio(cb->compress_type,
179 				      cb->compressed_pages,
180 				      cb->start,
181 				      cb->orig_bio,
182 				      cb->compressed_len);
183 csum_failed:
184 	if (ret)
185 		cb->errors = 1;
186 
187 	/* release the compressed pages */
188 	index = 0;
189 	for (index = 0; index < cb->nr_pages; index++) {
190 		page = cb->compressed_pages[index];
191 		page->mapping = NULL;
192 		put_page(page);
193 	}
194 
195 	/* do io completion on the original bio */
196 	if (cb->errors) {
197 		bio_io_error(cb->orig_bio);
198 	} else {
199 		int i;
200 		struct bio_vec *bvec;
201 
202 		/*
203 		 * we have verified the checksum already, set page
204 		 * checked so the end_io handlers know about it
205 		 */
206 		bio_for_each_segment_all(bvec, cb->orig_bio, i)
207 			SetPageChecked(bvec->bv_page);
208 
209 		bio_endio(cb->orig_bio);
210 	}
211 
212 	/* finally free the cb struct */
213 	kfree(cb->compressed_pages);
214 	kfree(cb);
215 out:
216 	bio_put(bio);
217 }
218 
219 /*
220  * Clear the writeback bits on all of the file
221  * pages for a compressed write
222  */
223 static noinline void end_compressed_writeback(struct inode *inode,
224 					      const struct compressed_bio *cb)
225 {
226 	unsigned long index = cb->start >> PAGE_SHIFT;
227 	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
228 	struct page *pages[16];
229 	unsigned long nr_pages = end_index - index + 1;
230 	int i;
231 	int ret;
232 
233 	if (cb->errors)
234 		mapping_set_error(inode->i_mapping, -EIO);
235 
236 	while (nr_pages > 0) {
237 		ret = find_get_pages_contig(inode->i_mapping, index,
238 				     min_t(unsigned long,
239 				     nr_pages, ARRAY_SIZE(pages)), pages);
240 		if (ret == 0) {
241 			nr_pages -= 1;
242 			index += 1;
243 			continue;
244 		}
245 		for (i = 0; i < ret; i++) {
246 			if (cb->errors)
247 				SetPageError(pages[i]);
248 			end_page_writeback(pages[i]);
249 			put_page(pages[i]);
250 		}
251 		nr_pages -= ret;
252 		index += ret;
253 	}
254 	/* the inode may be gone now */
255 }
256 
257 /*
258  * do the cleanup once all the compressed pages hit the disk.
259  * This will clear writeback on the file pages and free the compressed
260  * pages.
261  *
262  * This also calls the writeback end hooks for the file pages so that
263  * metadata and checksums can be updated in the file.
264  */
265 static void end_compressed_bio_write(struct bio *bio)
266 {
267 	struct extent_io_tree *tree;
268 	struct compressed_bio *cb = bio->bi_private;
269 	struct inode *inode;
270 	struct page *page;
271 	unsigned long index;
272 
273 	if (bio->bi_error)
274 		cb->errors = 1;
275 
276 	/* if there are more bios still pending for this compressed
277 	 * extent, just exit
278 	 */
279 	if (!atomic_dec_and_test(&cb->pending_bios))
280 		goto out;
281 
282 	/* ok, we're the last bio for this extent, step one is to
283 	 * call back into the FS and do all the end_io operations
284 	 */
285 	inode = cb->inode;
286 	tree = &BTRFS_I(inode)->io_tree;
287 	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
288 	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
289 					 cb->start,
290 					 cb->start + cb->len - 1,
291 					 NULL,
292 					 bio->bi_error ? 0 : 1);
293 	cb->compressed_pages[0]->mapping = NULL;
294 
295 	end_compressed_writeback(inode, cb);
296 	/* note, our inode could be gone now */
297 
298 	/*
299 	 * release the compressed pages, these came from alloc_page and
300 	 * are not attached to the inode at all
301 	 */
302 	index = 0;
303 	for (index = 0; index < cb->nr_pages; index++) {
304 		page = cb->compressed_pages[index];
305 		page->mapping = NULL;
306 		put_page(page);
307 	}
308 
309 	/* finally free the cb struct */
310 	kfree(cb->compressed_pages);
311 	kfree(cb);
312 out:
313 	bio_put(bio);
314 }
315 
316 /*
317  * worker function to build and submit bios for previously compressed pages.
318  * The corresponding pages in the inode should be marked for writeback
319  * and the compressed pages should have a reference on them for dropping
320  * when the IO is complete.
321  *
322  * This also checksums the file bytes and gets things ready for
323  * the end io hooks.
324  */
325 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
326 				 unsigned long len, u64 disk_start,
327 				 unsigned long compressed_len,
328 				 struct page **compressed_pages,
329 				 unsigned long nr_pages)
330 {
331 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
332 	struct bio *bio = NULL;
333 	struct compressed_bio *cb;
334 	unsigned long bytes_left;
335 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
336 	int pg_index = 0;
337 	struct page *page;
338 	u64 first_byte = disk_start;
339 	struct block_device *bdev;
340 	int ret;
341 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
342 
343 	WARN_ON(start & ((u64)PAGE_SIZE - 1));
344 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
345 	if (!cb)
346 		return -ENOMEM;
347 	atomic_set(&cb->pending_bios, 0);
348 	cb->errors = 0;
349 	cb->inode = inode;
350 	cb->start = start;
351 	cb->len = len;
352 	cb->mirror_num = 0;
353 	cb->compressed_pages = compressed_pages;
354 	cb->compressed_len = compressed_len;
355 	cb->orig_bio = NULL;
356 	cb->nr_pages = nr_pages;
357 
358 	bdev = fs_info->fs_devices->latest_bdev;
359 
360 	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
361 	if (!bio) {
362 		kfree(cb);
363 		return -ENOMEM;
364 	}
365 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
366 	bio->bi_private = cb;
367 	bio->bi_end_io = end_compressed_bio_write;
368 	atomic_inc(&cb->pending_bios);
369 
370 	/* create and submit bios for the compressed pages */
371 	bytes_left = compressed_len;
372 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
373 		page = compressed_pages[pg_index];
374 		page->mapping = inode->i_mapping;
375 		if (bio->bi_iter.bi_size)
376 			ret = io_tree->ops->merge_bio_hook(page, 0,
377 							   PAGE_SIZE,
378 							   bio, 0);
379 		else
380 			ret = 0;
381 
382 		page->mapping = NULL;
383 		if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
384 		    PAGE_SIZE) {
385 			bio_get(bio);
386 
387 			/*
388 			 * inc the count before we submit the bio so
389 			 * we know the end IO handler won't happen before
390 			 * we inc the count.  Otherwise, the cb might get
391 			 * freed before we're done setting it up
392 			 */
393 			atomic_inc(&cb->pending_bios);
394 			ret = btrfs_bio_wq_end_io(fs_info, bio,
395 						  BTRFS_WQ_ENDIO_DATA);
396 			BUG_ON(ret); /* -ENOMEM */
397 
398 			if (!skip_sum) {
399 				ret = btrfs_csum_one_bio(inode, bio, start, 1);
400 				BUG_ON(ret); /* -ENOMEM */
401 			}
402 
403 			ret = btrfs_map_bio(fs_info, bio, 0, 1);
404 			if (ret) {
405 				bio->bi_error = ret;
406 				bio_endio(bio);
407 			}
408 
409 			bio_put(bio);
410 
411 			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
412 			BUG_ON(!bio);
413 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
414 			bio->bi_private = cb;
415 			bio->bi_end_io = end_compressed_bio_write;
416 			bio_add_page(bio, page, PAGE_SIZE, 0);
417 		}
418 		if (bytes_left < PAGE_SIZE) {
419 			btrfs_info(fs_info,
420 					"bytes left %lu compress len %lu nr %lu",
421 			       bytes_left, cb->compressed_len, cb->nr_pages);
422 		}
423 		bytes_left -= PAGE_SIZE;
424 		first_byte += PAGE_SIZE;
425 		cond_resched();
426 	}
427 	bio_get(bio);
428 
429 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
430 	BUG_ON(ret); /* -ENOMEM */
431 
432 	if (!skip_sum) {
433 		ret = btrfs_csum_one_bio(inode, bio, start, 1);
434 		BUG_ON(ret); /* -ENOMEM */
435 	}
436 
437 	ret = btrfs_map_bio(fs_info, bio, 0, 1);
438 	if (ret) {
439 		bio->bi_error = ret;
440 		bio_endio(bio);
441 	}
442 
443 	bio_put(bio);
444 	return 0;
445 }
446 
447 static u64 bio_end_offset(struct bio *bio)
448 {
449 	struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
450 
451 	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
452 }
453 
454 static noinline int add_ra_bio_pages(struct inode *inode,
455 				     u64 compressed_end,
456 				     struct compressed_bio *cb)
457 {
458 	unsigned long end_index;
459 	unsigned long pg_index;
460 	u64 last_offset;
461 	u64 isize = i_size_read(inode);
462 	int ret;
463 	struct page *page;
464 	unsigned long nr_pages = 0;
465 	struct extent_map *em;
466 	struct address_space *mapping = inode->i_mapping;
467 	struct extent_map_tree *em_tree;
468 	struct extent_io_tree *tree;
469 	u64 end;
470 	int misses = 0;
471 
472 	last_offset = bio_end_offset(cb->orig_bio);
473 	em_tree = &BTRFS_I(inode)->extent_tree;
474 	tree = &BTRFS_I(inode)->io_tree;
475 
476 	if (isize == 0)
477 		return 0;
478 
479 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
480 
481 	while (last_offset < compressed_end) {
482 		pg_index = last_offset >> PAGE_SHIFT;
483 
484 		if (pg_index > end_index)
485 			break;
486 
487 		rcu_read_lock();
488 		page = radix_tree_lookup(&mapping->page_tree, pg_index);
489 		rcu_read_unlock();
490 		if (page && !radix_tree_exceptional_entry(page)) {
491 			misses++;
492 			if (misses > 4)
493 				break;
494 			goto next;
495 		}
496 
497 		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
498 								 ~__GFP_FS));
499 		if (!page)
500 			break;
501 
502 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
503 			put_page(page);
504 			goto next;
505 		}
506 
507 		end = last_offset + PAGE_SIZE - 1;
508 		/*
509 		 * at this point, we have a locked page in the page cache
510 		 * for these bytes in the file.  But, we have to make
511 		 * sure they map to this compressed extent on disk.
512 		 */
513 		set_page_extent_mapped(page);
514 		lock_extent(tree, last_offset, end);
515 		read_lock(&em_tree->lock);
516 		em = lookup_extent_mapping(em_tree, last_offset,
517 					   PAGE_SIZE);
518 		read_unlock(&em_tree->lock);
519 
520 		if (!em || last_offset < em->start ||
521 		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
522 		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
523 			free_extent_map(em);
524 			unlock_extent(tree, last_offset, end);
525 			unlock_page(page);
526 			put_page(page);
527 			break;
528 		}
529 		free_extent_map(em);
530 
531 		if (page->index == end_index) {
532 			char *userpage;
533 			size_t zero_offset = isize & (PAGE_SIZE - 1);
534 
535 			if (zero_offset) {
536 				int zeros;
537 				zeros = PAGE_SIZE - zero_offset;
538 				userpage = kmap_atomic(page);
539 				memset(userpage + zero_offset, 0, zeros);
540 				flush_dcache_page(page);
541 				kunmap_atomic(userpage);
542 			}
543 		}
544 
545 		ret = bio_add_page(cb->orig_bio, page,
546 				   PAGE_SIZE, 0);
547 
548 		if (ret == PAGE_SIZE) {
549 			nr_pages++;
550 			put_page(page);
551 		} else {
552 			unlock_extent(tree, last_offset, end);
553 			unlock_page(page);
554 			put_page(page);
555 			break;
556 		}
557 next:
558 		last_offset += PAGE_SIZE;
559 	}
560 	return 0;
561 }
562 
563 /*
564  * for a compressed read, the bio we get passed has all the inode pages
565  * in it.  We don't actually do IO on those pages but allocate new ones
566  * to hold the compressed pages on disk.
567  *
568  * bio->bi_iter.bi_sector points to the compressed extent on disk
569  * bio->bi_io_vec points to all of the inode pages
570  *
571  * After the compressed pages are read, we copy the bytes into the
572  * bio we were passed and then call the bio end_io calls
573  */
574 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
575 				 int mirror_num, unsigned long bio_flags)
576 {
577 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
578 	struct extent_io_tree *tree;
579 	struct extent_map_tree *em_tree;
580 	struct compressed_bio *cb;
581 	unsigned long compressed_len;
582 	unsigned long nr_pages;
583 	unsigned long pg_index;
584 	struct page *page;
585 	struct block_device *bdev;
586 	struct bio *comp_bio;
587 	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
588 	u64 em_len;
589 	u64 em_start;
590 	struct extent_map *em;
591 	int ret = -ENOMEM;
592 	int faili = 0;
593 	u32 *sums;
594 
595 	tree = &BTRFS_I(inode)->io_tree;
596 	em_tree = &BTRFS_I(inode)->extent_tree;
597 
598 	/* we need the actual starting offset of this extent in the file */
599 	read_lock(&em_tree->lock);
600 	em = lookup_extent_mapping(em_tree,
601 				   page_offset(bio->bi_io_vec->bv_page),
602 				   PAGE_SIZE);
603 	read_unlock(&em_tree->lock);
604 	if (!em)
605 		return -EIO;
606 
607 	compressed_len = em->block_len;
608 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
609 	if (!cb)
610 		goto out;
611 
612 	atomic_set(&cb->pending_bios, 0);
613 	cb->errors = 0;
614 	cb->inode = inode;
615 	cb->mirror_num = mirror_num;
616 	sums = &cb->sums;
617 
618 	cb->start = em->orig_start;
619 	em_len = em->len;
620 	em_start = em->start;
621 
622 	free_extent_map(em);
623 	em = NULL;
624 
625 	cb->len = bio->bi_iter.bi_size;
626 	cb->compressed_len = compressed_len;
627 	cb->compress_type = extent_compress_type(bio_flags);
628 	cb->orig_bio = bio;
629 
630 	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
631 	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
632 				       GFP_NOFS);
633 	if (!cb->compressed_pages)
634 		goto fail1;
635 
636 	bdev = fs_info->fs_devices->latest_bdev;
637 
638 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
639 		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
640 							      __GFP_HIGHMEM);
641 		if (!cb->compressed_pages[pg_index]) {
642 			faili = pg_index - 1;
643 			ret = -ENOMEM;
644 			goto fail2;
645 		}
646 	}
647 	faili = nr_pages - 1;
648 	cb->nr_pages = nr_pages;
649 
650 	add_ra_bio_pages(inode, em_start + em_len, cb);
651 
652 	/* include any pages we added in add_ra-bio_pages */
653 	cb->len = bio->bi_iter.bi_size;
654 
655 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
656 	if (!comp_bio)
657 		goto fail2;
658 	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
659 	comp_bio->bi_private = cb;
660 	comp_bio->bi_end_io = end_compressed_bio_read;
661 	atomic_inc(&cb->pending_bios);
662 
663 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
664 		page = cb->compressed_pages[pg_index];
665 		page->mapping = inode->i_mapping;
666 		page->index = em_start >> PAGE_SHIFT;
667 
668 		if (comp_bio->bi_iter.bi_size)
669 			ret = tree->ops->merge_bio_hook(page, 0,
670 							PAGE_SIZE,
671 							comp_bio, 0);
672 		else
673 			ret = 0;
674 
675 		page->mapping = NULL;
676 		if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
677 		    PAGE_SIZE) {
678 			bio_get(comp_bio);
679 
680 			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
681 						  BTRFS_WQ_ENDIO_DATA);
682 			BUG_ON(ret); /* -ENOMEM */
683 
684 			/*
685 			 * inc the count before we submit the bio so
686 			 * we know the end IO handler won't happen before
687 			 * we inc the count.  Otherwise, the cb might get
688 			 * freed before we're done setting it up
689 			 */
690 			atomic_inc(&cb->pending_bios);
691 
692 			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
693 				ret = btrfs_lookup_bio_sums(inode, comp_bio,
694 							    sums);
695 				BUG_ON(ret); /* -ENOMEM */
696 			}
697 			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
698 					     fs_info->sectorsize);
699 
700 			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
701 			if (ret) {
702 				comp_bio->bi_error = ret;
703 				bio_endio(comp_bio);
704 			}
705 
706 			bio_put(comp_bio);
707 
708 			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
709 							GFP_NOFS);
710 			BUG_ON(!comp_bio);
711 			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
712 			comp_bio->bi_private = cb;
713 			comp_bio->bi_end_io = end_compressed_bio_read;
714 
715 			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
716 		}
717 		cur_disk_byte += PAGE_SIZE;
718 	}
719 	bio_get(comp_bio);
720 
721 	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
722 	BUG_ON(ret); /* -ENOMEM */
723 
724 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
725 		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
726 		BUG_ON(ret); /* -ENOMEM */
727 	}
728 
729 	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
730 	if (ret) {
731 		comp_bio->bi_error = ret;
732 		bio_endio(comp_bio);
733 	}
734 
735 	bio_put(comp_bio);
736 	return 0;
737 
738 fail2:
739 	while (faili >= 0) {
740 		__free_page(cb->compressed_pages[faili]);
741 		faili--;
742 	}
743 
744 	kfree(cb->compressed_pages);
745 fail1:
746 	kfree(cb);
747 out:
748 	free_extent_map(em);
749 	return ret;
750 }
751 
752 static struct {
753 	struct list_head idle_ws;
754 	spinlock_t ws_lock;
755 	/* Number of free workspaces */
756 	int free_ws;
757 	/* Total number of allocated workspaces */
758 	atomic_t total_ws;
759 	/* Waiters for a free workspace */
760 	wait_queue_head_t ws_wait;
761 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
762 
763 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
764 	&btrfs_zlib_compress,
765 	&btrfs_lzo_compress,
766 };
767 
768 void __init btrfs_init_compress(void)
769 {
770 	int i;
771 
772 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
773 		struct list_head *workspace;
774 
775 		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
776 		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
777 		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
778 		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
779 
780 		/*
781 		 * Preallocate one workspace for each compression type so
782 		 * we can guarantee forward progress in the worst case
783 		 */
784 		workspace = btrfs_compress_op[i]->alloc_workspace();
785 		if (IS_ERR(workspace)) {
786 			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
787 		} else {
788 			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
789 			btrfs_comp_ws[i].free_ws = 1;
790 			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
791 		}
792 	}
793 }
794 
795 /*
796  * This finds an available workspace or allocates a new one.
797  * If it's not possible to allocate a new one, waits until there's one.
798  * Preallocation makes a forward progress guarantees and we do not return
799  * errors.
800  */
801 static struct list_head *find_workspace(int type)
802 {
803 	struct list_head *workspace;
804 	int cpus = num_online_cpus();
805 	int idx = type - 1;
806 
807 	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
808 	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
809 	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
810 	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
811 	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
812 again:
813 	spin_lock(ws_lock);
814 	if (!list_empty(idle_ws)) {
815 		workspace = idle_ws->next;
816 		list_del(workspace);
817 		(*free_ws)--;
818 		spin_unlock(ws_lock);
819 		return workspace;
820 
821 	}
822 	if (atomic_read(total_ws) > cpus) {
823 		DEFINE_WAIT(wait);
824 
825 		spin_unlock(ws_lock);
826 		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
827 		if (atomic_read(total_ws) > cpus && !*free_ws)
828 			schedule();
829 		finish_wait(ws_wait, &wait);
830 		goto again;
831 	}
832 	atomic_inc(total_ws);
833 	spin_unlock(ws_lock);
834 
835 	workspace = btrfs_compress_op[idx]->alloc_workspace();
836 	if (IS_ERR(workspace)) {
837 		atomic_dec(total_ws);
838 		wake_up(ws_wait);
839 
840 		/*
841 		 * Do not return the error but go back to waiting. There's a
842 		 * workspace preallocated for each type and the compression
843 		 * time is bounded so we get to a workspace eventually. This
844 		 * makes our caller's life easier.
845 		 *
846 		 * To prevent silent and low-probability deadlocks (when the
847 		 * initial preallocation fails), check if there are any
848 		 * workspaces at all.
849 		 */
850 		if (atomic_read(total_ws) == 0) {
851 			static DEFINE_RATELIMIT_STATE(_rs,
852 					/* once per minute */ 60 * HZ,
853 					/* no burst */ 1);
854 
855 			if (__ratelimit(&_rs)) {
856 				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
857 			}
858 		}
859 		goto again;
860 	}
861 	return workspace;
862 }
863 
864 /*
865  * put a workspace struct back on the list or free it if we have enough
866  * idle ones sitting around
867  */
868 static void free_workspace(int type, struct list_head *workspace)
869 {
870 	int idx = type - 1;
871 	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
872 	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
873 	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
874 	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
875 	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
876 
877 	spin_lock(ws_lock);
878 	if (*free_ws < num_online_cpus()) {
879 		list_add(workspace, idle_ws);
880 		(*free_ws)++;
881 		spin_unlock(ws_lock);
882 		goto wake;
883 	}
884 	spin_unlock(ws_lock);
885 
886 	btrfs_compress_op[idx]->free_workspace(workspace);
887 	atomic_dec(total_ws);
888 wake:
889 	/*
890 	 * Make sure counter is updated before we wake up waiters.
891 	 */
892 	smp_mb();
893 	if (waitqueue_active(ws_wait))
894 		wake_up(ws_wait);
895 }
896 
897 /*
898  * cleanup function for module exit
899  */
900 static void free_workspaces(void)
901 {
902 	struct list_head *workspace;
903 	int i;
904 
905 	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
906 		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
907 			workspace = btrfs_comp_ws[i].idle_ws.next;
908 			list_del(workspace);
909 			btrfs_compress_op[i]->free_workspace(workspace);
910 			atomic_dec(&btrfs_comp_ws[i].total_ws);
911 		}
912 	}
913 }
914 
915 /*
916  * given an address space and start/len, compress the bytes.
917  *
918  * pages are allocated to hold the compressed result and stored
919  * in 'pages'
920  *
921  * out_pages is used to return the number of pages allocated.  There
922  * may be pages allocated even if we return an error
923  *
924  * total_in is used to return the number of bytes actually read.  It
925  * may be smaller then len if we had to exit early because we
926  * ran out of room in the pages array or because we cross the
927  * max_out threshold.
928  *
929  * total_out is used to return the total number of compressed bytes
930  *
931  * max_out tells us the max number of bytes that we're allowed to
932  * stuff into pages
933  */
934 int btrfs_compress_pages(int type, struct address_space *mapping,
935 			 u64 start, unsigned long len,
936 			 struct page **pages,
937 			 unsigned long nr_dest_pages,
938 			 unsigned long *out_pages,
939 			 unsigned long *total_in,
940 			 unsigned long *total_out,
941 			 unsigned long max_out)
942 {
943 	struct list_head *workspace;
944 	int ret;
945 
946 	workspace = find_workspace(type);
947 
948 	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
949 						      start, len, pages,
950 						      nr_dest_pages, out_pages,
951 						      total_in, total_out,
952 						      max_out);
953 	free_workspace(type, workspace);
954 	return ret;
955 }
956 
957 /*
958  * pages_in is an array of pages with compressed data.
959  *
960  * disk_start is the starting logical offset of this array in the file
961  *
962  * orig_bio contains the pages from the file that we want to decompress into
963  *
964  * srclen is the number of bytes in pages_in
965  *
966  * The basic idea is that we have a bio that was created by readpages.
967  * The pages in the bio are for the uncompressed data, and they may not
968  * be contiguous.  They all correspond to the range of bytes covered by
969  * the compressed extent.
970  */
971 static int btrfs_decompress_bio(int type, struct page **pages_in,
972 				   u64 disk_start, struct bio *orig_bio,
973 				   size_t srclen)
974 {
975 	struct list_head *workspace;
976 	int ret;
977 
978 	workspace = find_workspace(type);
979 
980 	ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
981 							 disk_start, orig_bio,
982 							 srclen);
983 	free_workspace(type, workspace);
984 	return ret;
985 }
986 
987 /*
988  * a less complex decompression routine.  Our compressed data fits in a
989  * single page, and we want to read a single page out of it.
990  * start_byte tells us the offset into the compressed data we're interested in
991  */
992 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
993 		     unsigned long start_byte, size_t srclen, size_t destlen)
994 {
995 	struct list_head *workspace;
996 	int ret;
997 
998 	workspace = find_workspace(type);
999 
1000 	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1001 						  dest_page, start_byte,
1002 						  srclen, destlen);
1003 
1004 	free_workspace(type, workspace);
1005 	return ret;
1006 }
1007 
1008 void btrfs_exit_compress(void)
1009 {
1010 	free_workspaces();
1011 }
1012 
1013 /*
1014  * Copy uncompressed data from working buffer to pages.
1015  *
1016  * buf_start is the byte offset we're of the start of our workspace buffer.
1017  *
1018  * total_out is the last byte of the buffer
1019  */
1020 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1021 			      unsigned long total_out, u64 disk_start,
1022 			      struct bio *bio)
1023 {
1024 	unsigned long buf_offset;
1025 	unsigned long current_buf_start;
1026 	unsigned long start_byte;
1027 	unsigned long working_bytes = total_out - buf_start;
1028 	unsigned long bytes;
1029 	char *kaddr;
1030 	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1031 
1032 	/*
1033 	 * start byte is the first byte of the page we're currently
1034 	 * copying into relative to the start of the compressed data.
1035 	 */
1036 	start_byte = page_offset(bvec.bv_page) - disk_start;
1037 
1038 	/* we haven't yet hit data corresponding to this page */
1039 	if (total_out <= start_byte)
1040 		return 1;
1041 
1042 	/*
1043 	 * the start of the data we care about is offset into
1044 	 * the middle of our working buffer
1045 	 */
1046 	if (total_out > start_byte && buf_start < start_byte) {
1047 		buf_offset = start_byte - buf_start;
1048 		working_bytes -= buf_offset;
1049 	} else {
1050 		buf_offset = 0;
1051 	}
1052 	current_buf_start = buf_start;
1053 
1054 	/* copy bytes from the working buffer into the pages */
1055 	while (working_bytes > 0) {
1056 		bytes = min_t(unsigned long, bvec.bv_len,
1057 				PAGE_SIZE - buf_offset);
1058 		bytes = min(bytes, working_bytes);
1059 
1060 		kaddr = kmap_atomic(bvec.bv_page);
1061 		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1062 		kunmap_atomic(kaddr);
1063 		flush_dcache_page(bvec.bv_page);
1064 
1065 		buf_offset += bytes;
1066 		working_bytes -= bytes;
1067 		current_buf_start += bytes;
1068 
1069 		/* check if we need to pick another page */
1070 		bio_advance(bio, bytes);
1071 		if (!bio->bi_iter.bi_size)
1072 			return 0;
1073 		bvec = bio_iter_iovec(bio, bio->bi_iter);
1074 
1075 		start_byte = page_offset(bvec.bv_page) - disk_start;
1076 
1077 		/*
1078 		 * make sure our new page is covered by this
1079 		 * working buffer
1080 		 */
1081 		if (total_out <= start_byte)
1082 			return 1;
1083 
1084 		/*
1085 		 * the next page in the biovec might not be adjacent
1086 		 * to the last page, but it might still be found
1087 		 * inside this working buffer. bump our offset pointer
1088 		 */
1089 		if (total_out > start_byte &&
1090 		    current_buf_start < start_byte) {
1091 			buf_offset = start_byte - buf_start;
1092 			working_bytes = total_out - start_byte;
1093 			current_buf_start = buf_start + buf_offset;
1094 		}
1095 	}
1096 
1097 	return 1;
1098 }
1099