xref: /openbmc/linux/kernel/power/swap.c (revision 64cf26f0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/swap.c
4  *
5  * This file provides functions for reading the suspend image from
6  * and writing it to a swap partition.
7  *
8  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11  */
12 
13 #define pr_fmt(fmt) "PM: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/pm.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33 #include <linux/ktime.h>
34 
35 #include "power.h"
36 
37 #define HIBERNATE_SIG	"S1SUSPEND"
38 
39 /*
40  * When reading an {un,}compressed image, we may restore pages in place,
41  * in which case some architectures need these pages cleaning before they
42  * can be executed. We don't know which pages these may be, so clean the lot.
43  */
44 static bool clean_pages_on_read;
45 static bool clean_pages_on_decompress;
46 
47 /*
48  *	The swap map is a data structure used for keeping track of each page
49  *	written to a swap partition.  It consists of many swap_map_page
50  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51  *	These structures are stored on the swap and linked together with the
52  *	help of the .next_swap member.
53  *
54  *	The swap map is created during suspend.  The swap map pages are
55  *	allocated and populated one at a time, so we only need one memory
56  *	page to set up the entire structure.
57  *
58  *	During resume we pick up all swap_map_page structures into a list.
59  */
60 
61 #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
62 
63 /*
64  * Number of free pages that are not high.
65  */
66 static inline unsigned long low_free_pages(void)
67 {
68 	return nr_free_pages() - nr_free_highpages();
69 }
70 
71 /*
72  * Number of pages required to be kept free while writing the image. Always
73  * half of all available low pages before the writing starts.
74  */
75 static inline unsigned long reqd_free_pages(void)
76 {
77 	return low_free_pages() / 2;
78 }
79 
80 struct swap_map_page {
81 	sector_t entries[MAP_PAGE_ENTRIES];
82 	sector_t next_swap;
83 };
84 
85 struct swap_map_page_list {
86 	struct swap_map_page *map;
87 	struct swap_map_page_list *next;
88 };
89 
90 /**
91  *	The swap_map_handle structure is used for handling swap in
92  *	a file-alike way
93  */
94 
95 struct swap_map_handle {
96 	struct swap_map_page *cur;
97 	struct swap_map_page_list *maps;
98 	sector_t cur_swap;
99 	sector_t first_sector;
100 	unsigned int k;
101 	unsigned long reqd_free_pages;
102 	u32 crc32;
103 };
104 
105 struct swsusp_header {
106 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
107 	              sizeof(u32)];
108 	u32	crc32;
109 	sector_t image;
110 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
111 	char	orig_sig[10];
112 	char	sig[10];
113 } __packed;
114 
115 static struct swsusp_header *swsusp_header;
116 
117 /**
118  *	The following functions are used for tracing the allocated
119  *	swap pages, so that they can be freed in case of an error.
120  */
121 
122 struct swsusp_extent {
123 	struct rb_node node;
124 	unsigned long start;
125 	unsigned long end;
126 };
127 
128 static struct rb_root swsusp_extents = RB_ROOT;
129 
130 static int swsusp_extents_insert(unsigned long swap_offset)
131 {
132 	struct rb_node **new = &(swsusp_extents.rb_node);
133 	struct rb_node *parent = NULL;
134 	struct swsusp_extent *ext;
135 
136 	/* Figure out where to put the new node */
137 	while (*new) {
138 		ext = rb_entry(*new, struct swsusp_extent, node);
139 		parent = *new;
140 		if (swap_offset < ext->start) {
141 			/* Try to merge */
142 			if (swap_offset == ext->start - 1) {
143 				ext->start--;
144 				return 0;
145 			}
146 			new = &((*new)->rb_left);
147 		} else if (swap_offset > ext->end) {
148 			/* Try to merge */
149 			if (swap_offset == ext->end + 1) {
150 				ext->end++;
151 				return 0;
152 			}
153 			new = &((*new)->rb_right);
154 		} else {
155 			/* It already is in the tree */
156 			return -EINVAL;
157 		}
158 	}
159 	/* Add the new node and rebalance the tree. */
160 	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
161 	if (!ext)
162 		return -ENOMEM;
163 
164 	ext->start = swap_offset;
165 	ext->end = swap_offset;
166 	rb_link_node(&ext->node, parent, new);
167 	rb_insert_color(&ext->node, &swsusp_extents);
168 	return 0;
169 }
170 
171 /**
172  *	alloc_swapdev_block - allocate a swap page and register that it has
173  *	been allocated, so that it can be freed in case of an error.
174  */
175 
176 sector_t alloc_swapdev_block(int swap)
177 {
178 	unsigned long offset;
179 
180 	offset = swp_offset(get_swap_page_of_type(swap));
181 	if (offset) {
182 		if (swsusp_extents_insert(offset))
183 			swap_free(swp_entry(swap, offset));
184 		else
185 			return swapdev_block(swap, offset);
186 	}
187 	return 0;
188 }
189 
190 /**
191  *	free_all_swap_pages - free swap pages allocated for saving image data.
192  *	It also frees the extents used to register which swap entries had been
193  *	allocated.
194  */
195 
196 void free_all_swap_pages(int swap)
197 {
198 	struct rb_node *node;
199 
200 	while ((node = swsusp_extents.rb_node)) {
201 		struct swsusp_extent *ext;
202 		unsigned long offset;
203 
204 		ext = rb_entry(node, struct swsusp_extent, node);
205 		rb_erase(node, &swsusp_extents);
206 		for (offset = ext->start; offset <= ext->end; offset++)
207 			swap_free(swp_entry(swap, offset));
208 
209 		kfree(ext);
210 	}
211 }
212 
213 int swsusp_swap_in_use(void)
214 {
215 	return (swsusp_extents.rb_node != NULL);
216 }
217 
218 /*
219  * General things
220  */
221 
222 static unsigned short root_swap = 0xffff;
223 static struct block_device *hib_resume_bdev;
224 
225 struct hib_bio_batch {
226 	atomic_t		count;
227 	wait_queue_head_t	wait;
228 	blk_status_t		error;
229 	struct blk_plug		plug;
230 };
231 
232 static void hib_init_batch(struct hib_bio_batch *hb)
233 {
234 	atomic_set(&hb->count, 0);
235 	init_waitqueue_head(&hb->wait);
236 	hb->error = BLK_STS_OK;
237 	blk_start_plug(&hb->plug);
238 }
239 
240 static void hib_finish_batch(struct hib_bio_batch *hb)
241 {
242 	blk_finish_plug(&hb->plug);
243 }
244 
245 static void hib_end_io(struct bio *bio)
246 {
247 	struct hib_bio_batch *hb = bio->bi_private;
248 	struct page *page = bio_first_page_all(bio);
249 
250 	if (bio->bi_status) {
251 		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
252 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
253 			 (unsigned long long)bio->bi_iter.bi_sector);
254 	}
255 
256 	if (bio_data_dir(bio) == WRITE)
257 		put_page(page);
258 	else if (clean_pages_on_read)
259 		flush_icache_range((unsigned long)page_address(page),
260 				   (unsigned long)page_address(page) + PAGE_SIZE);
261 
262 	if (bio->bi_status && !hb->error)
263 		hb->error = bio->bi_status;
264 	if (atomic_dec_and_test(&hb->count))
265 		wake_up(&hb->wait);
266 
267 	bio_put(bio);
268 }
269 
270 static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
271 		struct hib_bio_batch *hb)
272 {
273 	struct page *page = virt_to_page(addr);
274 	struct bio *bio;
275 	int error = 0;
276 
277 	bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
278 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
279 	bio_set_dev(bio, hib_resume_bdev);
280 	bio_set_op_attrs(bio, op, op_flags);
281 
282 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
283 		pr_err("Adding page to bio failed at %llu\n",
284 		       (unsigned long long)bio->bi_iter.bi_sector);
285 		bio_put(bio);
286 		return -EFAULT;
287 	}
288 
289 	if (hb) {
290 		bio->bi_end_io = hib_end_io;
291 		bio->bi_private = hb;
292 		atomic_inc(&hb->count);
293 		submit_bio(bio);
294 	} else {
295 		error = submit_bio_wait(bio);
296 		bio_put(bio);
297 	}
298 
299 	return error;
300 }
301 
302 static int hib_wait_io(struct hib_bio_batch *hb)
303 {
304 	/*
305 	 * We are relying on the behavior of blk_plug that a thread with
306 	 * a plug will flush the plug list before sleeping.
307 	 */
308 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
309 	return blk_status_to_errno(hb->error);
310 }
311 
312 /*
313  * Saving part
314  */
315 
316 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
317 {
318 	int error;
319 
320 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
321 		      swsusp_header, NULL);
322 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
323 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
324 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
325 		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
326 		swsusp_header->image = handle->first_sector;
327 		swsusp_header->flags = flags;
328 		if (flags & SF_CRC32_MODE)
329 			swsusp_header->crc32 = handle->crc32;
330 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
331 				      swsusp_resume_block, swsusp_header, NULL);
332 	} else {
333 		pr_err("Swap header not found!\n");
334 		error = -ENODEV;
335 	}
336 	return error;
337 }
338 
339 /**
340  *	swsusp_swap_check - check if the resume device is a swap device
341  *	and get its index (if so)
342  *
343  *	This is called before saving image
344  */
345 static int swsusp_swap_check(void)
346 {
347 	int res;
348 
349 	if (swsusp_resume_device)
350 		res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
351 	else
352 		res = find_first_swap(&swsusp_resume_device);
353 	if (res < 0)
354 		return res;
355 	root_swap = res;
356 
357 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
358 			NULL);
359 	if (IS_ERR(hib_resume_bdev))
360 		return PTR_ERR(hib_resume_bdev);
361 
362 	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
363 	if (res < 0)
364 		blkdev_put(hib_resume_bdev, FMODE_WRITE);
365 
366 	return res;
367 }
368 
369 /**
370  *	write_page - Write one page to given swap location.
371  *	@buf:		Address we're writing.
372  *	@offset:	Offset of the swap page we're writing to.
373  *	@hb:		bio completion batch
374  */
375 
376 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
377 {
378 	void *src;
379 	int ret;
380 
381 	if (!offset)
382 		return -ENOSPC;
383 
384 	if (hb) {
385 		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
386 		                              __GFP_NORETRY);
387 		if (src) {
388 			copy_page(src, buf);
389 		} else {
390 			ret = hib_wait_io(hb); /* Free pages */
391 			if (ret)
392 				return ret;
393 			src = (void *)__get_free_page(GFP_NOIO |
394 			                              __GFP_NOWARN |
395 			                              __GFP_NORETRY);
396 			if (src) {
397 				copy_page(src, buf);
398 			} else {
399 				WARN_ON_ONCE(1);
400 				hb = NULL;	/* Go synchronous */
401 				src = buf;
402 			}
403 		}
404 	} else {
405 		src = buf;
406 	}
407 	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
408 }
409 
410 static void release_swap_writer(struct swap_map_handle *handle)
411 {
412 	if (handle->cur)
413 		free_page((unsigned long)handle->cur);
414 	handle->cur = NULL;
415 }
416 
417 static int get_swap_writer(struct swap_map_handle *handle)
418 {
419 	int ret;
420 
421 	ret = swsusp_swap_check();
422 	if (ret) {
423 		if (ret != -ENOSPC)
424 			pr_err("Cannot find swap device, try swapon -a\n");
425 		return ret;
426 	}
427 	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
428 	if (!handle->cur) {
429 		ret = -ENOMEM;
430 		goto err_close;
431 	}
432 	handle->cur_swap = alloc_swapdev_block(root_swap);
433 	if (!handle->cur_swap) {
434 		ret = -ENOSPC;
435 		goto err_rel;
436 	}
437 	handle->k = 0;
438 	handle->reqd_free_pages = reqd_free_pages();
439 	handle->first_sector = handle->cur_swap;
440 	return 0;
441 err_rel:
442 	release_swap_writer(handle);
443 err_close:
444 	swsusp_close(FMODE_WRITE);
445 	return ret;
446 }
447 
448 static int swap_write_page(struct swap_map_handle *handle, void *buf,
449 		struct hib_bio_batch *hb)
450 {
451 	int error = 0;
452 	sector_t offset;
453 
454 	if (!handle->cur)
455 		return -EINVAL;
456 	offset = alloc_swapdev_block(root_swap);
457 	error = write_page(buf, offset, hb);
458 	if (error)
459 		return error;
460 	handle->cur->entries[handle->k++] = offset;
461 	if (handle->k >= MAP_PAGE_ENTRIES) {
462 		offset = alloc_swapdev_block(root_swap);
463 		if (!offset)
464 			return -ENOSPC;
465 		handle->cur->next_swap = offset;
466 		error = write_page(handle->cur, handle->cur_swap, hb);
467 		if (error)
468 			goto out;
469 		clear_page(handle->cur);
470 		handle->cur_swap = offset;
471 		handle->k = 0;
472 
473 		if (hb && low_free_pages() <= handle->reqd_free_pages) {
474 			error = hib_wait_io(hb);
475 			if (error)
476 				goto out;
477 			/*
478 			 * Recalculate the number of required free pages, to
479 			 * make sure we never take more than half.
480 			 */
481 			handle->reqd_free_pages = reqd_free_pages();
482 		}
483 	}
484  out:
485 	return error;
486 }
487 
488 static int flush_swap_writer(struct swap_map_handle *handle)
489 {
490 	if (handle->cur && handle->cur_swap)
491 		return write_page(handle->cur, handle->cur_swap, NULL);
492 	else
493 		return -EINVAL;
494 }
495 
496 static int swap_writer_finish(struct swap_map_handle *handle,
497 		unsigned int flags, int error)
498 {
499 	if (!error) {
500 		pr_info("S");
501 		error = mark_swapfiles(handle, flags);
502 		pr_cont("|\n");
503 		flush_swap_writer(handle);
504 	}
505 
506 	if (error)
507 		free_all_swap_pages(root_swap);
508 	release_swap_writer(handle);
509 	swsusp_close(FMODE_WRITE);
510 
511 	return error;
512 }
513 
514 /* We need to remember how much compressed data we need to read. */
515 #define LZO_HEADER	sizeof(size_t)
516 
517 /* Number of pages/bytes we'll compress at one time. */
518 #define LZO_UNC_PAGES	32
519 #define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
520 
521 /* Number of pages/bytes we need for compressed data (worst case). */
522 #define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
523 			             LZO_HEADER, PAGE_SIZE)
524 #define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
525 
526 /* Maximum number of threads for compression/decompression. */
527 #define LZO_THREADS	3
528 
529 /* Minimum/maximum number of pages for read buffering. */
530 #define LZO_MIN_RD_PAGES	1024
531 #define LZO_MAX_RD_PAGES	8192
532 
533 
534 /**
535  *	save_image - save the suspend image data
536  */
537 
538 static int save_image(struct swap_map_handle *handle,
539                       struct snapshot_handle *snapshot,
540                       unsigned int nr_to_write)
541 {
542 	unsigned int m;
543 	int ret;
544 	int nr_pages;
545 	int err2;
546 	struct hib_bio_batch hb;
547 	ktime_t start;
548 	ktime_t stop;
549 
550 	hib_init_batch(&hb);
551 
552 	pr_info("Saving image data pages (%u pages)...\n",
553 		nr_to_write);
554 	m = nr_to_write / 10;
555 	if (!m)
556 		m = 1;
557 	nr_pages = 0;
558 	start = ktime_get();
559 	while (1) {
560 		ret = snapshot_read_next(snapshot);
561 		if (ret <= 0)
562 			break;
563 		ret = swap_write_page(handle, data_of(*snapshot), &hb);
564 		if (ret)
565 			break;
566 		if (!(nr_pages % m))
567 			pr_info("Image saving progress: %3d%%\n",
568 				nr_pages / m * 10);
569 		nr_pages++;
570 	}
571 	err2 = hib_wait_io(&hb);
572 	hib_finish_batch(&hb);
573 	stop = ktime_get();
574 	if (!ret)
575 		ret = err2;
576 	if (!ret)
577 		pr_info("Image saving done\n");
578 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
579 	return ret;
580 }
581 
582 /**
583  * Structure used for CRC32.
584  */
585 struct crc_data {
586 	struct task_struct *thr;                  /* thread */
587 	atomic_t ready;                           /* ready to start flag */
588 	atomic_t stop;                            /* ready to stop flag */
589 	unsigned run_threads;                     /* nr current threads */
590 	wait_queue_head_t go;                     /* start crc update */
591 	wait_queue_head_t done;                   /* crc update done */
592 	u32 *crc32;                               /* points to handle's crc32 */
593 	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
594 	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
595 };
596 
597 /**
598  * CRC32 update function that runs in its own thread.
599  */
600 static int crc32_threadfn(void *data)
601 {
602 	struct crc_data *d = data;
603 	unsigned i;
604 
605 	while (1) {
606 		wait_event(d->go, atomic_read(&d->ready) ||
607 		                  kthread_should_stop());
608 		if (kthread_should_stop()) {
609 			d->thr = NULL;
610 			atomic_set(&d->stop, 1);
611 			wake_up(&d->done);
612 			break;
613 		}
614 		atomic_set(&d->ready, 0);
615 
616 		for (i = 0; i < d->run_threads; i++)
617 			*d->crc32 = crc32_le(*d->crc32,
618 			                     d->unc[i], *d->unc_len[i]);
619 		atomic_set(&d->stop, 1);
620 		wake_up(&d->done);
621 	}
622 	return 0;
623 }
624 /**
625  * Structure used for LZO data compression.
626  */
627 struct cmp_data {
628 	struct task_struct *thr;                  /* thread */
629 	atomic_t ready;                           /* ready to start flag */
630 	atomic_t stop;                            /* ready to stop flag */
631 	int ret;                                  /* return code */
632 	wait_queue_head_t go;                     /* start compression */
633 	wait_queue_head_t done;                   /* compression done */
634 	size_t unc_len;                           /* uncompressed length */
635 	size_t cmp_len;                           /* compressed length */
636 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
637 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
638 	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
639 };
640 
641 /**
642  * Compression function that runs in its own thread.
643  */
644 static int lzo_compress_threadfn(void *data)
645 {
646 	struct cmp_data *d = data;
647 
648 	while (1) {
649 		wait_event(d->go, atomic_read(&d->ready) ||
650 		                  kthread_should_stop());
651 		if (kthread_should_stop()) {
652 			d->thr = NULL;
653 			d->ret = -1;
654 			atomic_set(&d->stop, 1);
655 			wake_up(&d->done);
656 			break;
657 		}
658 		atomic_set(&d->ready, 0);
659 
660 		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
661 		                          d->cmp + LZO_HEADER, &d->cmp_len,
662 		                          d->wrk);
663 		atomic_set(&d->stop, 1);
664 		wake_up(&d->done);
665 	}
666 	return 0;
667 }
668 
669 /**
670  * save_image_lzo - Save the suspend image data compressed with LZO.
671  * @handle: Swap map handle to use for saving the image.
672  * @snapshot: Image to read data from.
673  * @nr_to_write: Number of pages to save.
674  */
675 static int save_image_lzo(struct swap_map_handle *handle,
676                           struct snapshot_handle *snapshot,
677                           unsigned int nr_to_write)
678 {
679 	unsigned int m;
680 	int ret = 0;
681 	int nr_pages;
682 	int err2;
683 	struct hib_bio_batch hb;
684 	ktime_t start;
685 	ktime_t stop;
686 	size_t off;
687 	unsigned thr, run_threads, nr_threads;
688 	unsigned char *page = NULL;
689 	struct cmp_data *data = NULL;
690 	struct crc_data *crc = NULL;
691 
692 	hib_init_batch(&hb);
693 
694 	/*
695 	 * We'll limit the number of threads for compression to limit memory
696 	 * footprint.
697 	 */
698 	nr_threads = num_online_cpus() - 1;
699 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
700 
701 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
702 	if (!page) {
703 		pr_err("Failed to allocate LZO page\n");
704 		ret = -ENOMEM;
705 		goto out_clean;
706 	}
707 
708 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
709 	if (!data) {
710 		pr_err("Failed to allocate LZO data\n");
711 		ret = -ENOMEM;
712 		goto out_clean;
713 	}
714 
715 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
716 	if (!crc) {
717 		pr_err("Failed to allocate crc\n");
718 		ret = -ENOMEM;
719 		goto out_clean;
720 	}
721 
722 	/*
723 	 * Start the compression threads.
724 	 */
725 	for (thr = 0; thr < nr_threads; thr++) {
726 		init_waitqueue_head(&data[thr].go);
727 		init_waitqueue_head(&data[thr].done);
728 
729 		data[thr].thr = kthread_run(lzo_compress_threadfn,
730 		                            &data[thr],
731 		                            "image_compress/%u", thr);
732 		if (IS_ERR(data[thr].thr)) {
733 			data[thr].thr = NULL;
734 			pr_err("Cannot start compression threads\n");
735 			ret = -ENOMEM;
736 			goto out_clean;
737 		}
738 	}
739 
740 	/*
741 	 * Start the CRC32 thread.
742 	 */
743 	init_waitqueue_head(&crc->go);
744 	init_waitqueue_head(&crc->done);
745 
746 	handle->crc32 = 0;
747 	crc->crc32 = &handle->crc32;
748 	for (thr = 0; thr < nr_threads; thr++) {
749 		crc->unc[thr] = data[thr].unc;
750 		crc->unc_len[thr] = &data[thr].unc_len;
751 	}
752 
753 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
754 	if (IS_ERR(crc->thr)) {
755 		crc->thr = NULL;
756 		pr_err("Cannot start CRC32 thread\n");
757 		ret = -ENOMEM;
758 		goto out_clean;
759 	}
760 
761 	/*
762 	 * Adjust the number of required free pages after all allocations have
763 	 * been done. We don't want to run out of pages when writing.
764 	 */
765 	handle->reqd_free_pages = reqd_free_pages();
766 
767 	pr_info("Using %u thread(s) for compression\n", nr_threads);
768 	pr_info("Compressing and saving image data (%u pages)...\n",
769 		nr_to_write);
770 	m = nr_to_write / 10;
771 	if (!m)
772 		m = 1;
773 	nr_pages = 0;
774 	start = ktime_get();
775 	for (;;) {
776 		for (thr = 0; thr < nr_threads; thr++) {
777 			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
778 				ret = snapshot_read_next(snapshot);
779 				if (ret < 0)
780 					goto out_finish;
781 
782 				if (!ret)
783 					break;
784 
785 				memcpy(data[thr].unc + off,
786 				       data_of(*snapshot), PAGE_SIZE);
787 
788 				if (!(nr_pages % m))
789 					pr_info("Image saving progress: %3d%%\n",
790 						nr_pages / m * 10);
791 				nr_pages++;
792 			}
793 			if (!off)
794 				break;
795 
796 			data[thr].unc_len = off;
797 
798 			atomic_set(&data[thr].ready, 1);
799 			wake_up(&data[thr].go);
800 		}
801 
802 		if (!thr)
803 			break;
804 
805 		crc->run_threads = thr;
806 		atomic_set(&crc->ready, 1);
807 		wake_up(&crc->go);
808 
809 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
810 			wait_event(data[thr].done,
811 			           atomic_read(&data[thr].stop));
812 			atomic_set(&data[thr].stop, 0);
813 
814 			ret = data[thr].ret;
815 
816 			if (ret < 0) {
817 				pr_err("LZO compression failed\n");
818 				goto out_finish;
819 			}
820 
821 			if (unlikely(!data[thr].cmp_len ||
822 			             data[thr].cmp_len >
823 			             lzo1x_worst_compress(data[thr].unc_len))) {
824 				pr_err("Invalid LZO compressed length\n");
825 				ret = -1;
826 				goto out_finish;
827 			}
828 
829 			*(size_t *)data[thr].cmp = data[thr].cmp_len;
830 
831 			/*
832 			 * Given we are writing one page at a time to disk, we
833 			 * copy that much from the buffer, although the last
834 			 * bit will likely be smaller than full page. This is
835 			 * OK - we saved the length of the compressed data, so
836 			 * any garbage at the end will be discarded when we
837 			 * read it.
838 			 */
839 			for (off = 0;
840 			     off < LZO_HEADER + data[thr].cmp_len;
841 			     off += PAGE_SIZE) {
842 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
843 
844 				ret = swap_write_page(handle, page, &hb);
845 				if (ret)
846 					goto out_finish;
847 			}
848 		}
849 
850 		wait_event(crc->done, atomic_read(&crc->stop));
851 		atomic_set(&crc->stop, 0);
852 	}
853 
854 out_finish:
855 	err2 = hib_wait_io(&hb);
856 	stop = ktime_get();
857 	if (!ret)
858 		ret = err2;
859 	if (!ret)
860 		pr_info("Image saving done\n");
861 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
862 out_clean:
863 	hib_finish_batch(&hb);
864 	if (crc) {
865 		if (crc->thr)
866 			kthread_stop(crc->thr);
867 		kfree(crc);
868 	}
869 	if (data) {
870 		for (thr = 0; thr < nr_threads; thr++)
871 			if (data[thr].thr)
872 				kthread_stop(data[thr].thr);
873 		vfree(data);
874 	}
875 	if (page) free_page((unsigned long)page);
876 
877 	return ret;
878 }
879 
880 /**
881  *	enough_swap - Make sure we have enough swap to save the image.
882  *
883  *	Returns TRUE or FALSE after checking the total amount of swap
884  *	space available from the resume partition.
885  */
886 
887 static int enough_swap(unsigned int nr_pages)
888 {
889 	unsigned int free_swap = count_swap_pages(root_swap, 1);
890 	unsigned int required;
891 
892 	pr_debug("Free swap pages: %u\n", free_swap);
893 
894 	required = PAGES_FOR_IO + nr_pages;
895 	return free_swap > required;
896 }
897 
898 /**
899  *	swsusp_write - Write entire image and metadata.
900  *	@flags: flags to pass to the "boot" kernel in the image header
901  *
902  *	It is important _NOT_ to umount filesystems at this point. We want
903  *	them synced (in case something goes wrong) but we DO not want to mark
904  *	filesystem clean: it is not. (And it does not matter, if we resume
905  *	correctly, we'll mark system clean, anyway.)
906  */
907 
908 int swsusp_write(unsigned int flags)
909 {
910 	struct swap_map_handle handle;
911 	struct snapshot_handle snapshot;
912 	struct swsusp_info *header;
913 	unsigned long pages;
914 	int error;
915 
916 	pages = snapshot_get_image_size();
917 	error = get_swap_writer(&handle);
918 	if (error) {
919 		pr_err("Cannot get swap writer\n");
920 		return error;
921 	}
922 	if (flags & SF_NOCOMPRESS_MODE) {
923 		if (!enough_swap(pages)) {
924 			pr_err("Not enough free swap\n");
925 			error = -ENOSPC;
926 			goto out_finish;
927 		}
928 	}
929 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
930 	error = snapshot_read_next(&snapshot);
931 	if (error < (int)PAGE_SIZE) {
932 		if (error >= 0)
933 			error = -EFAULT;
934 
935 		goto out_finish;
936 	}
937 	header = (struct swsusp_info *)data_of(snapshot);
938 	error = swap_write_page(&handle, header, NULL);
939 	if (!error) {
940 		error = (flags & SF_NOCOMPRESS_MODE) ?
941 			save_image(&handle, &snapshot, pages - 1) :
942 			save_image_lzo(&handle, &snapshot, pages - 1);
943 	}
944 out_finish:
945 	error = swap_writer_finish(&handle, flags, error);
946 	return error;
947 }
948 
949 /**
950  *	The following functions allow us to read data using a swap map
951  *	in a file-alike way
952  */
953 
954 static void release_swap_reader(struct swap_map_handle *handle)
955 {
956 	struct swap_map_page_list *tmp;
957 
958 	while (handle->maps) {
959 		if (handle->maps->map)
960 			free_page((unsigned long)handle->maps->map);
961 		tmp = handle->maps;
962 		handle->maps = handle->maps->next;
963 		kfree(tmp);
964 	}
965 	handle->cur = NULL;
966 }
967 
968 static int get_swap_reader(struct swap_map_handle *handle,
969 		unsigned int *flags_p)
970 {
971 	int error;
972 	struct swap_map_page_list *tmp, *last;
973 	sector_t offset;
974 
975 	*flags_p = swsusp_header->flags;
976 
977 	if (!swsusp_header->image) /* how can this happen? */
978 		return -EINVAL;
979 
980 	handle->cur = NULL;
981 	last = handle->maps = NULL;
982 	offset = swsusp_header->image;
983 	while (offset) {
984 		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
985 		if (!tmp) {
986 			release_swap_reader(handle);
987 			return -ENOMEM;
988 		}
989 		if (!handle->maps)
990 			handle->maps = tmp;
991 		if (last)
992 			last->next = tmp;
993 		last = tmp;
994 
995 		tmp->map = (struct swap_map_page *)
996 			   __get_free_page(GFP_NOIO | __GFP_HIGH);
997 		if (!tmp->map) {
998 			release_swap_reader(handle);
999 			return -ENOMEM;
1000 		}
1001 
1002 		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
1003 		if (error) {
1004 			release_swap_reader(handle);
1005 			return error;
1006 		}
1007 		offset = tmp->map->next_swap;
1008 	}
1009 	handle->k = 0;
1010 	handle->cur = handle->maps->map;
1011 	return 0;
1012 }
1013 
1014 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1015 		struct hib_bio_batch *hb)
1016 {
1017 	sector_t offset;
1018 	int error;
1019 	struct swap_map_page_list *tmp;
1020 
1021 	if (!handle->cur)
1022 		return -EINVAL;
1023 	offset = handle->cur->entries[handle->k];
1024 	if (!offset)
1025 		return -EFAULT;
1026 	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1027 	if (error)
1028 		return error;
1029 	if (++handle->k >= MAP_PAGE_ENTRIES) {
1030 		handle->k = 0;
1031 		free_page((unsigned long)handle->maps->map);
1032 		tmp = handle->maps;
1033 		handle->maps = handle->maps->next;
1034 		kfree(tmp);
1035 		if (!handle->maps)
1036 			release_swap_reader(handle);
1037 		else
1038 			handle->cur = handle->maps->map;
1039 	}
1040 	return error;
1041 }
1042 
1043 static int swap_reader_finish(struct swap_map_handle *handle)
1044 {
1045 	release_swap_reader(handle);
1046 
1047 	return 0;
1048 }
1049 
1050 /**
1051  *	load_image - load the image using the swap map handle
1052  *	@handle and the snapshot handle @snapshot
1053  *	(assume there are @nr_pages pages to load)
1054  */
1055 
1056 static int load_image(struct swap_map_handle *handle,
1057                       struct snapshot_handle *snapshot,
1058                       unsigned int nr_to_read)
1059 {
1060 	unsigned int m;
1061 	int ret = 0;
1062 	ktime_t start;
1063 	ktime_t stop;
1064 	struct hib_bio_batch hb;
1065 	int err2;
1066 	unsigned nr_pages;
1067 
1068 	hib_init_batch(&hb);
1069 
1070 	clean_pages_on_read = true;
1071 	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1072 	m = nr_to_read / 10;
1073 	if (!m)
1074 		m = 1;
1075 	nr_pages = 0;
1076 	start = ktime_get();
1077 	for ( ; ; ) {
1078 		ret = snapshot_write_next(snapshot);
1079 		if (ret <= 0)
1080 			break;
1081 		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1082 		if (ret)
1083 			break;
1084 		if (snapshot->sync_read)
1085 			ret = hib_wait_io(&hb);
1086 		if (ret)
1087 			break;
1088 		if (!(nr_pages % m))
1089 			pr_info("Image loading progress: %3d%%\n",
1090 				nr_pages / m * 10);
1091 		nr_pages++;
1092 	}
1093 	err2 = hib_wait_io(&hb);
1094 	hib_finish_batch(&hb);
1095 	stop = ktime_get();
1096 	if (!ret)
1097 		ret = err2;
1098 	if (!ret) {
1099 		pr_info("Image loading done\n");
1100 		snapshot_write_finalize(snapshot);
1101 		if (!snapshot_image_loaded(snapshot))
1102 			ret = -ENODATA;
1103 	}
1104 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1105 	return ret;
1106 }
1107 
1108 /**
1109  * Structure used for LZO data decompression.
1110  */
1111 struct dec_data {
1112 	struct task_struct *thr;                  /* thread */
1113 	atomic_t ready;                           /* ready to start flag */
1114 	atomic_t stop;                            /* ready to stop flag */
1115 	int ret;                                  /* return code */
1116 	wait_queue_head_t go;                     /* start decompression */
1117 	wait_queue_head_t done;                   /* decompression done */
1118 	size_t unc_len;                           /* uncompressed length */
1119 	size_t cmp_len;                           /* compressed length */
1120 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1121 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1122 };
1123 
1124 /**
1125  * Decompression function that runs in its own thread.
1126  */
1127 static int lzo_decompress_threadfn(void *data)
1128 {
1129 	struct dec_data *d = data;
1130 
1131 	while (1) {
1132 		wait_event(d->go, atomic_read(&d->ready) ||
1133 		                  kthread_should_stop());
1134 		if (kthread_should_stop()) {
1135 			d->thr = NULL;
1136 			d->ret = -1;
1137 			atomic_set(&d->stop, 1);
1138 			wake_up(&d->done);
1139 			break;
1140 		}
1141 		atomic_set(&d->ready, 0);
1142 
1143 		d->unc_len = LZO_UNC_SIZE;
1144 		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1145 		                               d->unc, &d->unc_len);
1146 		if (clean_pages_on_decompress)
1147 			flush_icache_range((unsigned long)d->unc,
1148 					   (unsigned long)d->unc + d->unc_len);
1149 
1150 		atomic_set(&d->stop, 1);
1151 		wake_up(&d->done);
1152 	}
1153 	return 0;
1154 }
1155 
1156 /**
1157  * load_image_lzo - Load compressed image data and decompress them with LZO.
1158  * @handle: Swap map handle to use for loading data.
1159  * @snapshot: Image to copy uncompressed data into.
1160  * @nr_to_read: Number of pages to load.
1161  */
1162 static int load_image_lzo(struct swap_map_handle *handle,
1163                           struct snapshot_handle *snapshot,
1164                           unsigned int nr_to_read)
1165 {
1166 	unsigned int m;
1167 	int ret = 0;
1168 	int eof = 0;
1169 	struct hib_bio_batch hb;
1170 	ktime_t start;
1171 	ktime_t stop;
1172 	unsigned nr_pages;
1173 	size_t off;
1174 	unsigned i, thr, run_threads, nr_threads;
1175 	unsigned ring = 0, pg = 0, ring_size = 0,
1176 	         have = 0, want, need, asked = 0;
1177 	unsigned long read_pages = 0;
1178 	unsigned char **page = NULL;
1179 	struct dec_data *data = NULL;
1180 	struct crc_data *crc = NULL;
1181 
1182 	hib_init_batch(&hb);
1183 
1184 	/*
1185 	 * We'll limit the number of threads for decompression to limit memory
1186 	 * footprint.
1187 	 */
1188 	nr_threads = num_online_cpus() - 1;
1189 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1190 
1191 	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1192 	if (!page) {
1193 		pr_err("Failed to allocate LZO page\n");
1194 		ret = -ENOMEM;
1195 		goto out_clean;
1196 	}
1197 
1198 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
1199 	if (!data) {
1200 		pr_err("Failed to allocate LZO data\n");
1201 		ret = -ENOMEM;
1202 		goto out_clean;
1203 	}
1204 
1205 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1206 	if (!crc) {
1207 		pr_err("Failed to allocate crc\n");
1208 		ret = -ENOMEM;
1209 		goto out_clean;
1210 	}
1211 
1212 	clean_pages_on_decompress = true;
1213 
1214 	/*
1215 	 * Start the decompression threads.
1216 	 */
1217 	for (thr = 0; thr < nr_threads; thr++) {
1218 		init_waitqueue_head(&data[thr].go);
1219 		init_waitqueue_head(&data[thr].done);
1220 
1221 		data[thr].thr = kthread_run(lzo_decompress_threadfn,
1222 		                            &data[thr],
1223 		                            "image_decompress/%u", thr);
1224 		if (IS_ERR(data[thr].thr)) {
1225 			data[thr].thr = NULL;
1226 			pr_err("Cannot start decompression threads\n");
1227 			ret = -ENOMEM;
1228 			goto out_clean;
1229 		}
1230 	}
1231 
1232 	/*
1233 	 * Start the CRC32 thread.
1234 	 */
1235 	init_waitqueue_head(&crc->go);
1236 	init_waitqueue_head(&crc->done);
1237 
1238 	handle->crc32 = 0;
1239 	crc->crc32 = &handle->crc32;
1240 	for (thr = 0; thr < nr_threads; thr++) {
1241 		crc->unc[thr] = data[thr].unc;
1242 		crc->unc_len[thr] = &data[thr].unc_len;
1243 	}
1244 
1245 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1246 	if (IS_ERR(crc->thr)) {
1247 		crc->thr = NULL;
1248 		pr_err("Cannot start CRC32 thread\n");
1249 		ret = -ENOMEM;
1250 		goto out_clean;
1251 	}
1252 
1253 	/*
1254 	 * Set the number of pages for read buffering.
1255 	 * This is complete guesswork, because we'll only know the real
1256 	 * picture once prepare_image() is called, which is much later on
1257 	 * during the image load phase. We'll assume the worst case and
1258 	 * say that none of the image pages are from high memory.
1259 	 */
1260 	if (low_free_pages() > snapshot_get_image_size())
1261 		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1262 	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1263 
1264 	for (i = 0; i < read_pages; i++) {
1265 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1266 						  GFP_NOIO | __GFP_HIGH :
1267 						  GFP_NOIO | __GFP_NOWARN |
1268 						  __GFP_NORETRY);
1269 
1270 		if (!page[i]) {
1271 			if (i < LZO_CMP_PAGES) {
1272 				ring_size = i;
1273 				pr_err("Failed to allocate LZO pages\n");
1274 				ret = -ENOMEM;
1275 				goto out_clean;
1276 			} else {
1277 				break;
1278 			}
1279 		}
1280 	}
1281 	want = ring_size = i;
1282 
1283 	pr_info("Using %u thread(s) for decompression\n", nr_threads);
1284 	pr_info("Loading and decompressing image data (%u pages)...\n",
1285 		nr_to_read);
1286 	m = nr_to_read / 10;
1287 	if (!m)
1288 		m = 1;
1289 	nr_pages = 0;
1290 	start = ktime_get();
1291 
1292 	ret = snapshot_write_next(snapshot);
1293 	if (ret <= 0)
1294 		goto out_finish;
1295 
1296 	for(;;) {
1297 		for (i = 0; !eof && i < want; i++) {
1298 			ret = swap_read_page(handle, page[ring], &hb);
1299 			if (ret) {
1300 				/*
1301 				 * On real read error, finish. On end of data,
1302 				 * set EOF flag and just exit the read loop.
1303 				 */
1304 				if (handle->cur &&
1305 				    handle->cur->entries[handle->k]) {
1306 					goto out_finish;
1307 				} else {
1308 					eof = 1;
1309 					break;
1310 				}
1311 			}
1312 			if (++ring >= ring_size)
1313 				ring = 0;
1314 		}
1315 		asked += i;
1316 		want -= i;
1317 
1318 		/*
1319 		 * We are out of data, wait for some more.
1320 		 */
1321 		if (!have) {
1322 			if (!asked)
1323 				break;
1324 
1325 			ret = hib_wait_io(&hb);
1326 			if (ret)
1327 				goto out_finish;
1328 			have += asked;
1329 			asked = 0;
1330 			if (eof)
1331 				eof = 2;
1332 		}
1333 
1334 		if (crc->run_threads) {
1335 			wait_event(crc->done, atomic_read(&crc->stop));
1336 			atomic_set(&crc->stop, 0);
1337 			crc->run_threads = 0;
1338 		}
1339 
1340 		for (thr = 0; have && thr < nr_threads; thr++) {
1341 			data[thr].cmp_len = *(size_t *)page[pg];
1342 			if (unlikely(!data[thr].cmp_len ||
1343 			             data[thr].cmp_len >
1344 			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
1345 				pr_err("Invalid LZO compressed length\n");
1346 				ret = -1;
1347 				goto out_finish;
1348 			}
1349 
1350 			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1351 			                    PAGE_SIZE);
1352 			if (need > have) {
1353 				if (eof > 1) {
1354 					ret = -1;
1355 					goto out_finish;
1356 				}
1357 				break;
1358 			}
1359 
1360 			for (off = 0;
1361 			     off < LZO_HEADER + data[thr].cmp_len;
1362 			     off += PAGE_SIZE) {
1363 				memcpy(data[thr].cmp + off,
1364 				       page[pg], PAGE_SIZE);
1365 				have--;
1366 				want++;
1367 				if (++pg >= ring_size)
1368 					pg = 0;
1369 			}
1370 
1371 			atomic_set(&data[thr].ready, 1);
1372 			wake_up(&data[thr].go);
1373 		}
1374 
1375 		/*
1376 		 * Wait for more data while we are decompressing.
1377 		 */
1378 		if (have < LZO_CMP_PAGES && asked) {
1379 			ret = hib_wait_io(&hb);
1380 			if (ret)
1381 				goto out_finish;
1382 			have += asked;
1383 			asked = 0;
1384 			if (eof)
1385 				eof = 2;
1386 		}
1387 
1388 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1389 			wait_event(data[thr].done,
1390 			           atomic_read(&data[thr].stop));
1391 			atomic_set(&data[thr].stop, 0);
1392 
1393 			ret = data[thr].ret;
1394 
1395 			if (ret < 0) {
1396 				pr_err("LZO decompression failed\n");
1397 				goto out_finish;
1398 			}
1399 
1400 			if (unlikely(!data[thr].unc_len ||
1401 			             data[thr].unc_len > LZO_UNC_SIZE ||
1402 			             data[thr].unc_len & (PAGE_SIZE - 1))) {
1403 				pr_err("Invalid LZO uncompressed length\n");
1404 				ret = -1;
1405 				goto out_finish;
1406 			}
1407 
1408 			for (off = 0;
1409 			     off < data[thr].unc_len; off += PAGE_SIZE) {
1410 				memcpy(data_of(*snapshot),
1411 				       data[thr].unc + off, PAGE_SIZE);
1412 
1413 				if (!(nr_pages % m))
1414 					pr_info("Image loading progress: %3d%%\n",
1415 						nr_pages / m * 10);
1416 				nr_pages++;
1417 
1418 				ret = snapshot_write_next(snapshot);
1419 				if (ret <= 0) {
1420 					crc->run_threads = thr + 1;
1421 					atomic_set(&crc->ready, 1);
1422 					wake_up(&crc->go);
1423 					goto out_finish;
1424 				}
1425 			}
1426 		}
1427 
1428 		crc->run_threads = thr;
1429 		atomic_set(&crc->ready, 1);
1430 		wake_up(&crc->go);
1431 	}
1432 
1433 out_finish:
1434 	if (crc->run_threads) {
1435 		wait_event(crc->done, atomic_read(&crc->stop));
1436 		atomic_set(&crc->stop, 0);
1437 	}
1438 	stop = ktime_get();
1439 	if (!ret) {
1440 		pr_info("Image loading done\n");
1441 		snapshot_write_finalize(snapshot);
1442 		if (!snapshot_image_loaded(snapshot))
1443 			ret = -ENODATA;
1444 		if (!ret) {
1445 			if (swsusp_header->flags & SF_CRC32_MODE) {
1446 				if(handle->crc32 != swsusp_header->crc32) {
1447 					pr_err("Invalid image CRC32!\n");
1448 					ret = -ENODATA;
1449 				}
1450 			}
1451 		}
1452 	}
1453 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1454 out_clean:
1455 	hib_finish_batch(&hb);
1456 	for (i = 0; i < ring_size; i++)
1457 		free_page((unsigned long)page[i]);
1458 	if (crc) {
1459 		if (crc->thr)
1460 			kthread_stop(crc->thr);
1461 		kfree(crc);
1462 	}
1463 	if (data) {
1464 		for (thr = 0; thr < nr_threads; thr++)
1465 			if (data[thr].thr)
1466 				kthread_stop(data[thr].thr);
1467 		vfree(data);
1468 	}
1469 	vfree(page);
1470 
1471 	return ret;
1472 }
1473 
1474 /**
1475  *	swsusp_read - read the hibernation image.
1476  *	@flags_p: flags passed by the "frozen" kernel in the image header should
1477  *		  be written into this memory location
1478  */
1479 
1480 int swsusp_read(unsigned int *flags_p)
1481 {
1482 	int error;
1483 	struct swap_map_handle handle;
1484 	struct snapshot_handle snapshot;
1485 	struct swsusp_info *header;
1486 
1487 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1488 	error = snapshot_write_next(&snapshot);
1489 	if (error < (int)PAGE_SIZE)
1490 		return error < 0 ? error : -EFAULT;
1491 	header = (struct swsusp_info *)data_of(snapshot);
1492 	error = get_swap_reader(&handle, flags_p);
1493 	if (error)
1494 		goto end;
1495 	if (!error)
1496 		error = swap_read_page(&handle, header, NULL);
1497 	if (!error) {
1498 		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1499 			load_image(&handle, &snapshot, header->pages - 1) :
1500 			load_image_lzo(&handle, &snapshot, header->pages - 1);
1501 	}
1502 	swap_reader_finish(&handle);
1503 end:
1504 	if (!error)
1505 		pr_debug("Image successfully loaded\n");
1506 	else
1507 		pr_debug("Error %d resuming\n", error);
1508 	return error;
1509 }
1510 
1511 /**
1512  *      swsusp_check - Check for swsusp signature in the resume device
1513  */
1514 
1515 int swsusp_check(void)
1516 {
1517 	int error;
1518 	void *holder;
1519 
1520 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1521 					    FMODE_READ | FMODE_EXCL, &holder);
1522 	if (!IS_ERR(hib_resume_bdev)) {
1523 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
1524 		clear_page(swsusp_header);
1525 		error = hib_submit_io(REQ_OP_READ, 0,
1526 					swsusp_resume_block,
1527 					swsusp_header, NULL);
1528 		if (error)
1529 			goto put;
1530 
1531 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1532 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1533 			/* Reset swap signature now */
1534 			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1535 						swsusp_resume_block,
1536 						swsusp_header, NULL);
1537 		} else {
1538 			error = -EINVAL;
1539 		}
1540 
1541 put:
1542 		if (error)
1543 			blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
1544 		else
1545 			pr_debug("Image signature found, resuming\n");
1546 	} else {
1547 		error = PTR_ERR(hib_resume_bdev);
1548 	}
1549 
1550 	if (error)
1551 		pr_debug("Image not found (code %d)\n", error);
1552 
1553 	return error;
1554 }
1555 
1556 /**
1557  *	swsusp_close - close swap device.
1558  */
1559 
1560 void swsusp_close(fmode_t mode)
1561 {
1562 	if (IS_ERR(hib_resume_bdev)) {
1563 		pr_debug("Image device not initialised\n");
1564 		return;
1565 	}
1566 
1567 	blkdev_put(hib_resume_bdev, mode);
1568 }
1569 
1570 /**
1571  *      swsusp_unmark - Unmark swsusp signature in the resume device
1572  */
1573 
1574 #ifdef CONFIG_SUSPEND
1575 int swsusp_unmark(void)
1576 {
1577 	int error;
1578 
1579 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1580 		      swsusp_header, NULL);
1581 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1582 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1583 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1584 					swsusp_resume_block,
1585 					swsusp_header, NULL);
1586 	} else {
1587 		pr_err("Cannot find swsusp signature!\n");
1588 		error = -ENODEV;
1589 	}
1590 
1591 	/*
1592 	 * We just returned from suspend, we don't need the image any more.
1593 	 */
1594 	free_all_swap_pages(root_swap);
1595 
1596 	return error;
1597 }
1598 #endif
1599 
1600 static int __init swsusp_header_init(void)
1601 {
1602 	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1603 	if (!swsusp_header)
1604 		panic("Could not allocate memory for swsusp_header\n");
1605 	return 0;
1606 }
1607 
1608 core_initcall(swsusp_header_init);
1609