xref: /openbmc/linux/kernel/power/swap.c (revision b7019ac5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/swap.c
4  *
5  * This file provides functions for reading the suspend image from
6  * and writing it to a swap partition.
7  *
8  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11  */
12 
13 #define pr_fmt(fmt) "PM: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/pm.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33 #include <linux/ktime.h>
34 
35 #include "power.h"
36 
37 #define HIBERNATE_SIG	"S1SUSPEND"
38 
39 /*
40  * When reading an {un,}compressed image, we may restore pages in place,
41  * in which case some architectures need these pages cleaning before they
42  * can be executed. We don't know which pages these may be, so clean the lot.
43  */
44 static bool clean_pages_on_read;
45 static bool clean_pages_on_decompress;
46 
47 /*
48  *	The swap map is a data structure used for keeping track of each page
49  *	written to a swap partition.  It consists of many swap_map_page
50  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51  *	These structures are stored on the swap and linked together with the
52  *	help of the .next_swap member.
53  *
54  *	The swap map is created during suspend.  The swap map pages are
55  *	allocated and populated one at a time, so we only need one memory
56  *	page to set up the entire structure.
57  *
58  *	During resume we pick up all swap_map_page structures into a list.
59  */
60 
61 #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
62 
63 /*
64  * Number of free pages that are not high.
65  */
66 static inline unsigned long low_free_pages(void)
67 {
68 	return nr_free_pages() - nr_free_highpages();
69 }
70 
71 /*
72  * Number of pages required to be kept free while writing the image. Always
73  * half of all available low pages before the writing starts.
74  */
75 static inline unsigned long reqd_free_pages(void)
76 {
77 	return low_free_pages() / 2;
78 }
79 
80 struct swap_map_page {
81 	sector_t entries[MAP_PAGE_ENTRIES];
82 	sector_t next_swap;
83 };
84 
85 struct swap_map_page_list {
86 	struct swap_map_page *map;
87 	struct swap_map_page_list *next;
88 };
89 
90 /**
91  *	The swap_map_handle structure is used for handling swap in
92  *	a file-alike way
93  */
94 
95 struct swap_map_handle {
96 	struct swap_map_page *cur;
97 	struct swap_map_page_list *maps;
98 	sector_t cur_swap;
99 	sector_t first_sector;
100 	unsigned int k;
101 	unsigned long reqd_free_pages;
102 	u32 crc32;
103 };
104 
105 struct swsusp_header {
106 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
107 	              sizeof(u32)];
108 	u32	crc32;
109 	sector_t image;
110 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
111 	char	orig_sig[10];
112 	char	sig[10];
113 } __packed;
114 
115 static struct swsusp_header *swsusp_header;
116 
117 /**
118  *	The following functions are used for tracing the allocated
119  *	swap pages, so that they can be freed in case of an error.
120  */
121 
122 struct swsusp_extent {
123 	struct rb_node node;
124 	unsigned long start;
125 	unsigned long end;
126 };
127 
128 static struct rb_root swsusp_extents = RB_ROOT;
129 
130 static int swsusp_extents_insert(unsigned long swap_offset)
131 {
132 	struct rb_node **new = &(swsusp_extents.rb_node);
133 	struct rb_node *parent = NULL;
134 	struct swsusp_extent *ext;
135 
136 	/* Figure out where to put the new node */
137 	while (*new) {
138 		ext = rb_entry(*new, struct swsusp_extent, node);
139 		parent = *new;
140 		if (swap_offset < ext->start) {
141 			/* Try to merge */
142 			if (swap_offset == ext->start - 1) {
143 				ext->start--;
144 				return 0;
145 			}
146 			new = &((*new)->rb_left);
147 		} else if (swap_offset > ext->end) {
148 			/* Try to merge */
149 			if (swap_offset == ext->end + 1) {
150 				ext->end++;
151 				return 0;
152 			}
153 			new = &((*new)->rb_right);
154 		} else {
155 			/* It already is in the tree */
156 			return -EINVAL;
157 		}
158 	}
159 	/* Add the new node and rebalance the tree. */
160 	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
161 	if (!ext)
162 		return -ENOMEM;
163 
164 	ext->start = swap_offset;
165 	ext->end = swap_offset;
166 	rb_link_node(&ext->node, parent, new);
167 	rb_insert_color(&ext->node, &swsusp_extents);
168 	return 0;
169 }
170 
171 /**
172  *	alloc_swapdev_block - allocate a swap page and register that it has
173  *	been allocated, so that it can be freed in case of an error.
174  */
175 
176 sector_t alloc_swapdev_block(int swap)
177 {
178 	unsigned long offset;
179 
180 	offset = swp_offset(get_swap_page_of_type(swap));
181 	if (offset) {
182 		if (swsusp_extents_insert(offset))
183 			swap_free(swp_entry(swap, offset));
184 		else
185 			return swapdev_block(swap, offset);
186 	}
187 	return 0;
188 }
189 
190 /**
191  *	free_all_swap_pages - free swap pages allocated for saving image data.
192  *	It also frees the extents used to register which swap entries had been
193  *	allocated.
194  */
195 
196 void free_all_swap_pages(int swap)
197 {
198 	struct rb_node *node;
199 
200 	while ((node = swsusp_extents.rb_node)) {
201 		struct swsusp_extent *ext;
202 		unsigned long offset;
203 
204 		ext = rb_entry(node, struct swsusp_extent, node);
205 		rb_erase(node, &swsusp_extents);
206 		for (offset = ext->start; offset <= ext->end; offset++)
207 			swap_free(swp_entry(swap, offset));
208 
209 		kfree(ext);
210 	}
211 }
212 
213 int swsusp_swap_in_use(void)
214 {
215 	return (swsusp_extents.rb_node != NULL);
216 }
217 
218 /*
219  * General things
220  */
221 
222 static unsigned short root_swap = 0xffff;
223 static struct block_device *hib_resume_bdev;
224 
225 struct hib_bio_batch {
226 	atomic_t		count;
227 	wait_queue_head_t	wait;
228 	blk_status_t		error;
229 };
230 
231 static void hib_init_batch(struct hib_bio_batch *hb)
232 {
233 	atomic_set(&hb->count, 0);
234 	init_waitqueue_head(&hb->wait);
235 	hb->error = BLK_STS_OK;
236 }
237 
238 static void hib_end_io(struct bio *bio)
239 {
240 	struct hib_bio_batch *hb = bio->bi_private;
241 	struct page *page = bio_first_page_all(bio);
242 
243 	if (bio->bi_status) {
244 		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
245 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
246 			 (unsigned long long)bio->bi_iter.bi_sector);
247 	}
248 
249 	if (bio_data_dir(bio) == WRITE)
250 		put_page(page);
251 	else if (clean_pages_on_read)
252 		flush_icache_range((unsigned long)page_address(page),
253 				   (unsigned long)page_address(page) + PAGE_SIZE);
254 
255 	if (bio->bi_status && !hb->error)
256 		hb->error = bio->bi_status;
257 	if (atomic_dec_and_test(&hb->count))
258 		wake_up(&hb->wait);
259 
260 	bio_put(bio);
261 }
262 
263 static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
264 		struct hib_bio_batch *hb)
265 {
266 	struct page *page = virt_to_page(addr);
267 	struct bio *bio;
268 	int error = 0;
269 
270 	bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
271 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
272 	bio_set_dev(bio, hib_resume_bdev);
273 	bio_set_op_attrs(bio, op, op_flags);
274 
275 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
276 		pr_err("Adding page to bio failed at %llu\n",
277 		       (unsigned long long)bio->bi_iter.bi_sector);
278 		bio_put(bio);
279 		return -EFAULT;
280 	}
281 
282 	if (hb) {
283 		bio->bi_end_io = hib_end_io;
284 		bio->bi_private = hb;
285 		atomic_inc(&hb->count);
286 		submit_bio(bio);
287 	} else {
288 		error = submit_bio_wait(bio);
289 		bio_put(bio);
290 	}
291 
292 	return error;
293 }
294 
295 static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
296 {
297 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
298 	return blk_status_to_errno(hb->error);
299 }
300 
301 /*
302  * Saving part
303  */
304 
305 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
306 {
307 	int error;
308 
309 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
310 		      swsusp_header, NULL);
311 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
312 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
313 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
314 		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
315 		swsusp_header->image = handle->first_sector;
316 		swsusp_header->flags = flags;
317 		if (flags & SF_CRC32_MODE)
318 			swsusp_header->crc32 = handle->crc32;
319 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
320 				      swsusp_resume_block, swsusp_header, NULL);
321 	} else {
322 		pr_err("Swap header not found!\n");
323 		error = -ENODEV;
324 	}
325 	return error;
326 }
327 
328 /**
329  *	swsusp_swap_check - check if the resume device is a swap device
330  *	and get its index (if so)
331  *
332  *	This is called before saving image
333  */
334 static int swsusp_swap_check(void)
335 {
336 	int res;
337 
338 	res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
339 			&hib_resume_bdev);
340 	if (res < 0)
341 		return res;
342 
343 	root_swap = res;
344 	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
345 	if (res)
346 		return res;
347 
348 	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
349 	if (res < 0)
350 		blkdev_put(hib_resume_bdev, FMODE_WRITE);
351 
352 	/*
353 	 * Update the resume device to the one actually used,
354 	 * so the test_resume mode can use it in case it is
355 	 * invoked from hibernate() to test the snapshot.
356 	 */
357 	swsusp_resume_device = hib_resume_bdev->bd_dev;
358 	return res;
359 }
360 
361 /**
362  *	write_page - Write one page to given swap location.
363  *	@buf:		Address we're writing.
364  *	@offset:	Offset of the swap page we're writing to.
365  *	@hb:		bio completion batch
366  */
367 
368 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
369 {
370 	void *src;
371 	int ret;
372 
373 	if (!offset)
374 		return -ENOSPC;
375 
376 	if (hb) {
377 		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
378 		                              __GFP_NORETRY);
379 		if (src) {
380 			copy_page(src, buf);
381 		} else {
382 			ret = hib_wait_io(hb); /* Free pages */
383 			if (ret)
384 				return ret;
385 			src = (void *)__get_free_page(GFP_NOIO |
386 			                              __GFP_NOWARN |
387 			                              __GFP_NORETRY);
388 			if (src) {
389 				copy_page(src, buf);
390 			} else {
391 				WARN_ON_ONCE(1);
392 				hb = NULL;	/* Go synchronous */
393 				src = buf;
394 			}
395 		}
396 	} else {
397 		src = buf;
398 	}
399 	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
400 }
401 
402 static void release_swap_writer(struct swap_map_handle *handle)
403 {
404 	if (handle->cur)
405 		free_page((unsigned long)handle->cur);
406 	handle->cur = NULL;
407 }
408 
409 static int get_swap_writer(struct swap_map_handle *handle)
410 {
411 	int ret;
412 
413 	ret = swsusp_swap_check();
414 	if (ret) {
415 		if (ret != -ENOSPC)
416 			pr_err("Cannot find swap device, try swapon -a\n");
417 		return ret;
418 	}
419 	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
420 	if (!handle->cur) {
421 		ret = -ENOMEM;
422 		goto err_close;
423 	}
424 	handle->cur_swap = alloc_swapdev_block(root_swap);
425 	if (!handle->cur_swap) {
426 		ret = -ENOSPC;
427 		goto err_rel;
428 	}
429 	handle->k = 0;
430 	handle->reqd_free_pages = reqd_free_pages();
431 	handle->first_sector = handle->cur_swap;
432 	return 0;
433 err_rel:
434 	release_swap_writer(handle);
435 err_close:
436 	swsusp_close(FMODE_WRITE);
437 	return ret;
438 }
439 
440 static int swap_write_page(struct swap_map_handle *handle, void *buf,
441 		struct hib_bio_batch *hb)
442 {
443 	int error = 0;
444 	sector_t offset;
445 
446 	if (!handle->cur)
447 		return -EINVAL;
448 	offset = alloc_swapdev_block(root_swap);
449 	error = write_page(buf, offset, hb);
450 	if (error)
451 		return error;
452 	handle->cur->entries[handle->k++] = offset;
453 	if (handle->k >= MAP_PAGE_ENTRIES) {
454 		offset = alloc_swapdev_block(root_swap);
455 		if (!offset)
456 			return -ENOSPC;
457 		handle->cur->next_swap = offset;
458 		error = write_page(handle->cur, handle->cur_swap, hb);
459 		if (error)
460 			goto out;
461 		clear_page(handle->cur);
462 		handle->cur_swap = offset;
463 		handle->k = 0;
464 
465 		if (hb && low_free_pages() <= handle->reqd_free_pages) {
466 			error = hib_wait_io(hb);
467 			if (error)
468 				goto out;
469 			/*
470 			 * Recalculate the number of required free pages, to
471 			 * make sure we never take more than half.
472 			 */
473 			handle->reqd_free_pages = reqd_free_pages();
474 		}
475 	}
476  out:
477 	return error;
478 }
479 
480 static int flush_swap_writer(struct swap_map_handle *handle)
481 {
482 	if (handle->cur && handle->cur_swap)
483 		return write_page(handle->cur, handle->cur_swap, NULL);
484 	else
485 		return -EINVAL;
486 }
487 
488 static int swap_writer_finish(struct swap_map_handle *handle,
489 		unsigned int flags, int error)
490 {
491 	if (!error) {
492 		flush_swap_writer(handle);
493 		pr_info("S");
494 		error = mark_swapfiles(handle, flags);
495 		pr_cont("|\n");
496 	}
497 
498 	if (error)
499 		free_all_swap_pages(root_swap);
500 	release_swap_writer(handle);
501 	swsusp_close(FMODE_WRITE);
502 
503 	return error;
504 }
505 
506 /* We need to remember how much compressed data we need to read. */
507 #define LZO_HEADER	sizeof(size_t)
508 
509 /* Number of pages/bytes we'll compress at one time. */
510 #define LZO_UNC_PAGES	32
511 #define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
512 
513 /* Number of pages/bytes we need for compressed data (worst case). */
514 #define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
515 			             LZO_HEADER, PAGE_SIZE)
516 #define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
517 
518 /* Maximum number of threads for compression/decompression. */
519 #define LZO_THREADS	3
520 
521 /* Minimum/maximum number of pages for read buffering. */
522 #define LZO_MIN_RD_PAGES	1024
523 #define LZO_MAX_RD_PAGES	8192
524 
525 
526 /**
527  *	save_image - save the suspend image data
528  */
529 
530 static int save_image(struct swap_map_handle *handle,
531                       struct snapshot_handle *snapshot,
532                       unsigned int nr_to_write)
533 {
534 	unsigned int m;
535 	int ret;
536 	int nr_pages;
537 	int err2;
538 	struct hib_bio_batch hb;
539 	ktime_t start;
540 	ktime_t stop;
541 
542 	hib_init_batch(&hb);
543 
544 	pr_info("Saving image data pages (%u pages)...\n",
545 		nr_to_write);
546 	m = nr_to_write / 10;
547 	if (!m)
548 		m = 1;
549 	nr_pages = 0;
550 	start = ktime_get();
551 	while (1) {
552 		ret = snapshot_read_next(snapshot);
553 		if (ret <= 0)
554 			break;
555 		ret = swap_write_page(handle, data_of(*snapshot), &hb);
556 		if (ret)
557 			break;
558 		if (!(nr_pages % m))
559 			pr_info("Image saving progress: %3d%%\n",
560 				nr_pages / m * 10);
561 		nr_pages++;
562 	}
563 	err2 = hib_wait_io(&hb);
564 	stop = ktime_get();
565 	if (!ret)
566 		ret = err2;
567 	if (!ret)
568 		pr_info("Image saving done\n");
569 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
570 	return ret;
571 }
572 
573 /**
574  * Structure used for CRC32.
575  */
576 struct crc_data {
577 	struct task_struct *thr;                  /* thread */
578 	atomic_t ready;                           /* ready to start flag */
579 	atomic_t stop;                            /* ready to stop flag */
580 	unsigned run_threads;                     /* nr current threads */
581 	wait_queue_head_t go;                     /* start crc update */
582 	wait_queue_head_t done;                   /* crc update done */
583 	u32 *crc32;                               /* points to handle's crc32 */
584 	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
585 	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
586 };
587 
588 /**
589  * CRC32 update function that runs in its own thread.
590  */
591 static int crc32_threadfn(void *data)
592 {
593 	struct crc_data *d = data;
594 	unsigned i;
595 
596 	while (1) {
597 		wait_event(d->go, atomic_read(&d->ready) ||
598 		                  kthread_should_stop());
599 		if (kthread_should_stop()) {
600 			d->thr = NULL;
601 			atomic_set(&d->stop, 1);
602 			wake_up(&d->done);
603 			break;
604 		}
605 		atomic_set(&d->ready, 0);
606 
607 		for (i = 0; i < d->run_threads; i++)
608 			*d->crc32 = crc32_le(*d->crc32,
609 			                     d->unc[i], *d->unc_len[i]);
610 		atomic_set(&d->stop, 1);
611 		wake_up(&d->done);
612 	}
613 	return 0;
614 }
615 /**
616  * Structure used for LZO data compression.
617  */
618 struct cmp_data {
619 	struct task_struct *thr;                  /* thread */
620 	atomic_t ready;                           /* ready to start flag */
621 	atomic_t stop;                            /* ready to stop flag */
622 	int ret;                                  /* return code */
623 	wait_queue_head_t go;                     /* start compression */
624 	wait_queue_head_t done;                   /* compression done */
625 	size_t unc_len;                           /* uncompressed length */
626 	size_t cmp_len;                           /* compressed length */
627 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
628 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
629 	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
630 };
631 
632 /**
633  * Compression function that runs in its own thread.
634  */
635 static int lzo_compress_threadfn(void *data)
636 {
637 	struct cmp_data *d = data;
638 
639 	while (1) {
640 		wait_event(d->go, atomic_read(&d->ready) ||
641 		                  kthread_should_stop());
642 		if (kthread_should_stop()) {
643 			d->thr = NULL;
644 			d->ret = -1;
645 			atomic_set(&d->stop, 1);
646 			wake_up(&d->done);
647 			break;
648 		}
649 		atomic_set(&d->ready, 0);
650 
651 		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
652 		                          d->cmp + LZO_HEADER, &d->cmp_len,
653 		                          d->wrk);
654 		atomic_set(&d->stop, 1);
655 		wake_up(&d->done);
656 	}
657 	return 0;
658 }
659 
660 /**
661  * save_image_lzo - Save the suspend image data compressed with LZO.
662  * @handle: Swap map handle to use for saving the image.
663  * @snapshot: Image to read data from.
664  * @nr_to_write: Number of pages to save.
665  */
666 static int save_image_lzo(struct swap_map_handle *handle,
667                           struct snapshot_handle *snapshot,
668                           unsigned int nr_to_write)
669 {
670 	unsigned int m;
671 	int ret = 0;
672 	int nr_pages;
673 	int err2;
674 	struct hib_bio_batch hb;
675 	ktime_t start;
676 	ktime_t stop;
677 	size_t off;
678 	unsigned thr, run_threads, nr_threads;
679 	unsigned char *page = NULL;
680 	struct cmp_data *data = NULL;
681 	struct crc_data *crc = NULL;
682 
683 	hib_init_batch(&hb);
684 
685 	/*
686 	 * We'll limit the number of threads for compression to limit memory
687 	 * footprint.
688 	 */
689 	nr_threads = num_online_cpus() - 1;
690 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
691 
692 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
693 	if (!page) {
694 		pr_err("Failed to allocate LZO page\n");
695 		ret = -ENOMEM;
696 		goto out_clean;
697 	}
698 
699 	data = vmalloc(array_size(nr_threads, sizeof(*data)));
700 	if (!data) {
701 		pr_err("Failed to allocate LZO data\n");
702 		ret = -ENOMEM;
703 		goto out_clean;
704 	}
705 	for (thr = 0; thr < nr_threads; thr++)
706 		memset(&data[thr], 0, offsetof(struct cmp_data, go));
707 
708 	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
709 	if (!crc) {
710 		pr_err("Failed to allocate crc\n");
711 		ret = -ENOMEM;
712 		goto out_clean;
713 	}
714 	memset(crc, 0, offsetof(struct crc_data, go));
715 
716 	/*
717 	 * Start the compression threads.
718 	 */
719 	for (thr = 0; thr < nr_threads; thr++) {
720 		init_waitqueue_head(&data[thr].go);
721 		init_waitqueue_head(&data[thr].done);
722 
723 		data[thr].thr = kthread_run(lzo_compress_threadfn,
724 		                            &data[thr],
725 		                            "image_compress/%u", thr);
726 		if (IS_ERR(data[thr].thr)) {
727 			data[thr].thr = NULL;
728 			pr_err("Cannot start compression threads\n");
729 			ret = -ENOMEM;
730 			goto out_clean;
731 		}
732 	}
733 
734 	/*
735 	 * Start the CRC32 thread.
736 	 */
737 	init_waitqueue_head(&crc->go);
738 	init_waitqueue_head(&crc->done);
739 
740 	handle->crc32 = 0;
741 	crc->crc32 = &handle->crc32;
742 	for (thr = 0; thr < nr_threads; thr++) {
743 		crc->unc[thr] = data[thr].unc;
744 		crc->unc_len[thr] = &data[thr].unc_len;
745 	}
746 
747 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
748 	if (IS_ERR(crc->thr)) {
749 		crc->thr = NULL;
750 		pr_err("Cannot start CRC32 thread\n");
751 		ret = -ENOMEM;
752 		goto out_clean;
753 	}
754 
755 	/*
756 	 * Adjust the number of required free pages after all allocations have
757 	 * been done. We don't want to run out of pages when writing.
758 	 */
759 	handle->reqd_free_pages = reqd_free_pages();
760 
761 	pr_info("Using %u thread(s) for compression\n", nr_threads);
762 	pr_info("Compressing and saving image data (%u pages)...\n",
763 		nr_to_write);
764 	m = nr_to_write / 10;
765 	if (!m)
766 		m = 1;
767 	nr_pages = 0;
768 	start = ktime_get();
769 	for (;;) {
770 		for (thr = 0; thr < nr_threads; thr++) {
771 			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
772 				ret = snapshot_read_next(snapshot);
773 				if (ret < 0)
774 					goto out_finish;
775 
776 				if (!ret)
777 					break;
778 
779 				memcpy(data[thr].unc + off,
780 				       data_of(*snapshot), PAGE_SIZE);
781 
782 				if (!(nr_pages % m))
783 					pr_info("Image saving progress: %3d%%\n",
784 						nr_pages / m * 10);
785 				nr_pages++;
786 			}
787 			if (!off)
788 				break;
789 
790 			data[thr].unc_len = off;
791 
792 			atomic_set(&data[thr].ready, 1);
793 			wake_up(&data[thr].go);
794 		}
795 
796 		if (!thr)
797 			break;
798 
799 		crc->run_threads = thr;
800 		atomic_set(&crc->ready, 1);
801 		wake_up(&crc->go);
802 
803 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
804 			wait_event(data[thr].done,
805 			           atomic_read(&data[thr].stop));
806 			atomic_set(&data[thr].stop, 0);
807 
808 			ret = data[thr].ret;
809 
810 			if (ret < 0) {
811 				pr_err("LZO compression failed\n");
812 				goto out_finish;
813 			}
814 
815 			if (unlikely(!data[thr].cmp_len ||
816 			             data[thr].cmp_len >
817 			             lzo1x_worst_compress(data[thr].unc_len))) {
818 				pr_err("Invalid LZO compressed length\n");
819 				ret = -1;
820 				goto out_finish;
821 			}
822 
823 			*(size_t *)data[thr].cmp = data[thr].cmp_len;
824 
825 			/*
826 			 * Given we are writing one page at a time to disk, we
827 			 * copy that much from the buffer, although the last
828 			 * bit will likely be smaller than full page. This is
829 			 * OK - we saved the length of the compressed data, so
830 			 * any garbage at the end will be discarded when we
831 			 * read it.
832 			 */
833 			for (off = 0;
834 			     off < LZO_HEADER + data[thr].cmp_len;
835 			     off += PAGE_SIZE) {
836 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
837 
838 				ret = swap_write_page(handle, page, &hb);
839 				if (ret)
840 					goto out_finish;
841 			}
842 		}
843 
844 		wait_event(crc->done, atomic_read(&crc->stop));
845 		atomic_set(&crc->stop, 0);
846 	}
847 
848 out_finish:
849 	err2 = hib_wait_io(&hb);
850 	stop = ktime_get();
851 	if (!ret)
852 		ret = err2;
853 	if (!ret)
854 		pr_info("Image saving done\n");
855 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
856 out_clean:
857 	if (crc) {
858 		if (crc->thr)
859 			kthread_stop(crc->thr);
860 		kfree(crc);
861 	}
862 	if (data) {
863 		for (thr = 0; thr < nr_threads; thr++)
864 			if (data[thr].thr)
865 				kthread_stop(data[thr].thr);
866 		vfree(data);
867 	}
868 	if (page) free_page((unsigned long)page);
869 
870 	return ret;
871 }
872 
873 /**
874  *	enough_swap - Make sure we have enough swap to save the image.
875  *
876  *	Returns TRUE or FALSE after checking the total amount of swap
877  *	space avaiable from the resume partition.
878  */
879 
880 static int enough_swap(unsigned int nr_pages)
881 {
882 	unsigned int free_swap = count_swap_pages(root_swap, 1);
883 	unsigned int required;
884 
885 	pr_debug("Free swap pages: %u\n", free_swap);
886 
887 	required = PAGES_FOR_IO + nr_pages;
888 	return free_swap > required;
889 }
890 
891 /**
892  *	swsusp_write - Write entire image and metadata.
893  *	@flags: flags to pass to the "boot" kernel in the image header
894  *
895  *	It is important _NOT_ to umount filesystems at this point. We want
896  *	them synced (in case something goes wrong) but we DO not want to mark
897  *	filesystem clean: it is not. (And it does not matter, if we resume
898  *	correctly, we'll mark system clean, anyway.)
899  */
900 
901 int swsusp_write(unsigned int flags)
902 {
903 	struct swap_map_handle handle;
904 	struct snapshot_handle snapshot;
905 	struct swsusp_info *header;
906 	unsigned long pages;
907 	int error;
908 
909 	pages = snapshot_get_image_size();
910 	error = get_swap_writer(&handle);
911 	if (error) {
912 		pr_err("Cannot get swap writer\n");
913 		return error;
914 	}
915 	if (flags & SF_NOCOMPRESS_MODE) {
916 		if (!enough_swap(pages)) {
917 			pr_err("Not enough free swap\n");
918 			error = -ENOSPC;
919 			goto out_finish;
920 		}
921 	}
922 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
923 	error = snapshot_read_next(&snapshot);
924 	if (error < (int)PAGE_SIZE) {
925 		if (error >= 0)
926 			error = -EFAULT;
927 
928 		goto out_finish;
929 	}
930 	header = (struct swsusp_info *)data_of(snapshot);
931 	error = swap_write_page(&handle, header, NULL);
932 	if (!error) {
933 		error = (flags & SF_NOCOMPRESS_MODE) ?
934 			save_image(&handle, &snapshot, pages - 1) :
935 			save_image_lzo(&handle, &snapshot, pages - 1);
936 	}
937 out_finish:
938 	error = swap_writer_finish(&handle, flags, error);
939 	return error;
940 }
941 
942 /**
943  *	The following functions allow us to read data using a swap map
944  *	in a file-alike way
945  */
946 
947 static void release_swap_reader(struct swap_map_handle *handle)
948 {
949 	struct swap_map_page_list *tmp;
950 
951 	while (handle->maps) {
952 		if (handle->maps->map)
953 			free_page((unsigned long)handle->maps->map);
954 		tmp = handle->maps;
955 		handle->maps = handle->maps->next;
956 		kfree(tmp);
957 	}
958 	handle->cur = NULL;
959 }
960 
961 static int get_swap_reader(struct swap_map_handle *handle,
962 		unsigned int *flags_p)
963 {
964 	int error;
965 	struct swap_map_page_list *tmp, *last;
966 	sector_t offset;
967 
968 	*flags_p = swsusp_header->flags;
969 
970 	if (!swsusp_header->image) /* how can this happen? */
971 		return -EINVAL;
972 
973 	handle->cur = NULL;
974 	last = handle->maps = NULL;
975 	offset = swsusp_header->image;
976 	while (offset) {
977 		tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
978 		if (!tmp) {
979 			release_swap_reader(handle);
980 			return -ENOMEM;
981 		}
982 		memset(tmp, 0, sizeof(*tmp));
983 		if (!handle->maps)
984 			handle->maps = tmp;
985 		if (last)
986 			last->next = tmp;
987 		last = tmp;
988 
989 		tmp->map = (struct swap_map_page *)
990 			   __get_free_page(GFP_NOIO | __GFP_HIGH);
991 		if (!tmp->map) {
992 			release_swap_reader(handle);
993 			return -ENOMEM;
994 		}
995 
996 		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
997 		if (error) {
998 			release_swap_reader(handle);
999 			return error;
1000 		}
1001 		offset = tmp->map->next_swap;
1002 	}
1003 	handle->k = 0;
1004 	handle->cur = handle->maps->map;
1005 	return 0;
1006 }
1007 
1008 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1009 		struct hib_bio_batch *hb)
1010 {
1011 	sector_t offset;
1012 	int error;
1013 	struct swap_map_page_list *tmp;
1014 
1015 	if (!handle->cur)
1016 		return -EINVAL;
1017 	offset = handle->cur->entries[handle->k];
1018 	if (!offset)
1019 		return -EFAULT;
1020 	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1021 	if (error)
1022 		return error;
1023 	if (++handle->k >= MAP_PAGE_ENTRIES) {
1024 		handle->k = 0;
1025 		free_page((unsigned long)handle->maps->map);
1026 		tmp = handle->maps;
1027 		handle->maps = handle->maps->next;
1028 		kfree(tmp);
1029 		if (!handle->maps)
1030 			release_swap_reader(handle);
1031 		else
1032 			handle->cur = handle->maps->map;
1033 	}
1034 	return error;
1035 }
1036 
1037 static int swap_reader_finish(struct swap_map_handle *handle)
1038 {
1039 	release_swap_reader(handle);
1040 
1041 	return 0;
1042 }
1043 
1044 /**
1045  *	load_image - load the image using the swap map handle
1046  *	@handle and the snapshot handle @snapshot
1047  *	(assume there are @nr_pages pages to load)
1048  */
1049 
1050 static int load_image(struct swap_map_handle *handle,
1051                       struct snapshot_handle *snapshot,
1052                       unsigned int nr_to_read)
1053 {
1054 	unsigned int m;
1055 	int ret = 0;
1056 	ktime_t start;
1057 	ktime_t stop;
1058 	struct hib_bio_batch hb;
1059 	int err2;
1060 	unsigned nr_pages;
1061 
1062 	hib_init_batch(&hb);
1063 
1064 	clean_pages_on_read = true;
1065 	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1066 	m = nr_to_read / 10;
1067 	if (!m)
1068 		m = 1;
1069 	nr_pages = 0;
1070 	start = ktime_get();
1071 	for ( ; ; ) {
1072 		ret = snapshot_write_next(snapshot);
1073 		if (ret <= 0)
1074 			break;
1075 		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1076 		if (ret)
1077 			break;
1078 		if (snapshot->sync_read)
1079 			ret = hib_wait_io(&hb);
1080 		if (ret)
1081 			break;
1082 		if (!(nr_pages % m))
1083 			pr_info("Image loading progress: %3d%%\n",
1084 				nr_pages / m * 10);
1085 		nr_pages++;
1086 	}
1087 	err2 = hib_wait_io(&hb);
1088 	stop = ktime_get();
1089 	if (!ret)
1090 		ret = err2;
1091 	if (!ret) {
1092 		pr_info("Image loading done\n");
1093 		snapshot_write_finalize(snapshot);
1094 		if (!snapshot_image_loaded(snapshot))
1095 			ret = -ENODATA;
1096 	}
1097 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1098 	return ret;
1099 }
1100 
1101 /**
1102  * Structure used for LZO data decompression.
1103  */
1104 struct dec_data {
1105 	struct task_struct *thr;                  /* thread */
1106 	atomic_t ready;                           /* ready to start flag */
1107 	atomic_t stop;                            /* ready to stop flag */
1108 	int ret;                                  /* return code */
1109 	wait_queue_head_t go;                     /* start decompression */
1110 	wait_queue_head_t done;                   /* decompression done */
1111 	size_t unc_len;                           /* uncompressed length */
1112 	size_t cmp_len;                           /* compressed length */
1113 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1114 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1115 };
1116 
1117 /**
1118  * Deompression function that runs in its own thread.
1119  */
1120 static int lzo_decompress_threadfn(void *data)
1121 {
1122 	struct dec_data *d = data;
1123 
1124 	while (1) {
1125 		wait_event(d->go, atomic_read(&d->ready) ||
1126 		                  kthread_should_stop());
1127 		if (kthread_should_stop()) {
1128 			d->thr = NULL;
1129 			d->ret = -1;
1130 			atomic_set(&d->stop, 1);
1131 			wake_up(&d->done);
1132 			break;
1133 		}
1134 		atomic_set(&d->ready, 0);
1135 
1136 		d->unc_len = LZO_UNC_SIZE;
1137 		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1138 		                               d->unc, &d->unc_len);
1139 		if (clean_pages_on_decompress)
1140 			flush_icache_range((unsigned long)d->unc,
1141 					   (unsigned long)d->unc + d->unc_len);
1142 
1143 		atomic_set(&d->stop, 1);
1144 		wake_up(&d->done);
1145 	}
1146 	return 0;
1147 }
1148 
1149 /**
1150  * load_image_lzo - Load compressed image data and decompress them with LZO.
1151  * @handle: Swap map handle to use for loading data.
1152  * @snapshot: Image to copy uncompressed data into.
1153  * @nr_to_read: Number of pages to load.
1154  */
1155 static int load_image_lzo(struct swap_map_handle *handle,
1156                           struct snapshot_handle *snapshot,
1157                           unsigned int nr_to_read)
1158 {
1159 	unsigned int m;
1160 	int ret = 0;
1161 	int eof = 0;
1162 	struct hib_bio_batch hb;
1163 	ktime_t start;
1164 	ktime_t stop;
1165 	unsigned nr_pages;
1166 	size_t off;
1167 	unsigned i, thr, run_threads, nr_threads;
1168 	unsigned ring = 0, pg = 0, ring_size = 0,
1169 	         have = 0, want, need, asked = 0;
1170 	unsigned long read_pages = 0;
1171 	unsigned char **page = NULL;
1172 	struct dec_data *data = NULL;
1173 	struct crc_data *crc = NULL;
1174 
1175 	hib_init_batch(&hb);
1176 
1177 	/*
1178 	 * We'll limit the number of threads for decompression to limit memory
1179 	 * footprint.
1180 	 */
1181 	nr_threads = num_online_cpus() - 1;
1182 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1183 
1184 	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1185 	if (!page) {
1186 		pr_err("Failed to allocate LZO page\n");
1187 		ret = -ENOMEM;
1188 		goto out_clean;
1189 	}
1190 
1191 	data = vmalloc(array_size(nr_threads, sizeof(*data)));
1192 	if (!data) {
1193 		pr_err("Failed to allocate LZO data\n");
1194 		ret = -ENOMEM;
1195 		goto out_clean;
1196 	}
1197 	for (thr = 0; thr < nr_threads; thr++)
1198 		memset(&data[thr], 0, offsetof(struct dec_data, go));
1199 
1200 	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1201 	if (!crc) {
1202 		pr_err("Failed to allocate crc\n");
1203 		ret = -ENOMEM;
1204 		goto out_clean;
1205 	}
1206 	memset(crc, 0, offsetof(struct crc_data, go));
1207 
1208 	clean_pages_on_decompress = true;
1209 
1210 	/*
1211 	 * Start the decompression threads.
1212 	 */
1213 	for (thr = 0; thr < nr_threads; thr++) {
1214 		init_waitqueue_head(&data[thr].go);
1215 		init_waitqueue_head(&data[thr].done);
1216 
1217 		data[thr].thr = kthread_run(lzo_decompress_threadfn,
1218 		                            &data[thr],
1219 		                            "image_decompress/%u", thr);
1220 		if (IS_ERR(data[thr].thr)) {
1221 			data[thr].thr = NULL;
1222 			pr_err("Cannot start decompression threads\n");
1223 			ret = -ENOMEM;
1224 			goto out_clean;
1225 		}
1226 	}
1227 
1228 	/*
1229 	 * Start the CRC32 thread.
1230 	 */
1231 	init_waitqueue_head(&crc->go);
1232 	init_waitqueue_head(&crc->done);
1233 
1234 	handle->crc32 = 0;
1235 	crc->crc32 = &handle->crc32;
1236 	for (thr = 0; thr < nr_threads; thr++) {
1237 		crc->unc[thr] = data[thr].unc;
1238 		crc->unc_len[thr] = &data[thr].unc_len;
1239 	}
1240 
1241 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1242 	if (IS_ERR(crc->thr)) {
1243 		crc->thr = NULL;
1244 		pr_err("Cannot start CRC32 thread\n");
1245 		ret = -ENOMEM;
1246 		goto out_clean;
1247 	}
1248 
1249 	/*
1250 	 * Set the number of pages for read buffering.
1251 	 * This is complete guesswork, because we'll only know the real
1252 	 * picture once prepare_image() is called, which is much later on
1253 	 * during the image load phase. We'll assume the worst case and
1254 	 * say that none of the image pages are from high memory.
1255 	 */
1256 	if (low_free_pages() > snapshot_get_image_size())
1257 		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1258 	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1259 
1260 	for (i = 0; i < read_pages; i++) {
1261 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1262 						  GFP_NOIO | __GFP_HIGH :
1263 						  GFP_NOIO | __GFP_NOWARN |
1264 						  __GFP_NORETRY);
1265 
1266 		if (!page[i]) {
1267 			if (i < LZO_CMP_PAGES) {
1268 				ring_size = i;
1269 				pr_err("Failed to allocate LZO pages\n");
1270 				ret = -ENOMEM;
1271 				goto out_clean;
1272 			} else {
1273 				break;
1274 			}
1275 		}
1276 	}
1277 	want = ring_size = i;
1278 
1279 	pr_info("Using %u thread(s) for decompression\n", nr_threads);
1280 	pr_info("Loading and decompressing image data (%u pages)...\n",
1281 		nr_to_read);
1282 	m = nr_to_read / 10;
1283 	if (!m)
1284 		m = 1;
1285 	nr_pages = 0;
1286 	start = ktime_get();
1287 
1288 	ret = snapshot_write_next(snapshot);
1289 	if (ret <= 0)
1290 		goto out_finish;
1291 
1292 	for(;;) {
1293 		for (i = 0; !eof && i < want; i++) {
1294 			ret = swap_read_page(handle, page[ring], &hb);
1295 			if (ret) {
1296 				/*
1297 				 * On real read error, finish. On end of data,
1298 				 * set EOF flag and just exit the read loop.
1299 				 */
1300 				if (handle->cur &&
1301 				    handle->cur->entries[handle->k]) {
1302 					goto out_finish;
1303 				} else {
1304 					eof = 1;
1305 					break;
1306 				}
1307 			}
1308 			if (++ring >= ring_size)
1309 				ring = 0;
1310 		}
1311 		asked += i;
1312 		want -= i;
1313 
1314 		/*
1315 		 * We are out of data, wait for some more.
1316 		 */
1317 		if (!have) {
1318 			if (!asked)
1319 				break;
1320 
1321 			ret = hib_wait_io(&hb);
1322 			if (ret)
1323 				goto out_finish;
1324 			have += asked;
1325 			asked = 0;
1326 			if (eof)
1327 				eof = 2;
1328 		}
1329 
1330 		if (crc->run_threads) {
1331 			wait_event(crc->done, atomic_read(&crc->stop));
1332 			atomic_set(&crc->stop, 0);
1333 			crc->run_threads = 0;
1334 		}
1335 
1336 		for (thr = 0; have && thr < nr_threads; thr++) {
1337 			data[thr].cmp_len = *(size_t *)page[pg];
1338 			if (unlikely(!data[thr].cmp_len ||
1339 			             data[thr].cmp_len >
1340 			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
1341 				pr_err("Invalid LZO compressed length\n");
1342 				ret = -1;
1343 				goto out_finish;
1344 			}
1345 
1346 			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1347 			                    PAGE_SIZE);
1348 			if (need > have) {
1349 				if (eof > 1) {
1350 					ret = -1;
1351 					goto out_finish;
1352 				}
1353 				break;
1354 			}
1355 
1356 			for (off = 0;
1357 			     off < LZO_HEADER + data[thr].cmp_len;
1358 			     off += PAGE_SIZE) {
1359 				memcpy(data[thr].cmp + off,
1360 				       page[pg], PAGE_SIZE);
1361 				have--;
1362 				want++;
1363 				if (++pg >= ring_size)
1364 					pg = 0;
1365 			}
1366 
1367 			atomic_set(&data[thr].ready, 1);
1368 			wake_up(&data[thr].go);
1369 		}
1370 
1371 		/*
1372 		 * Wait for more data while we are decompressing.
1373 		 */
1374 		if (have < LZO_CMP_PAGES && asked) {
1375 			ret = hib_wait_io(&hb);
1376 			if (ret)
1377 				goto out_finish;
1378 			have += asked;
1379 			asked = 0;
1380 			if (eof)
1381 				eof = 2;
1382 		}
1383 
1384 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1385 			wait_event(data[thr].done,
1386 			           atomic_read(&data[thr].stop));
1387 			atomic_set(&data[thr].stop, 0);
1388 
1389 			ret = data[thr].ret;
1390 
1391 			if (ret < 0) {
1392 				pr_err("LZO decompression failed\n");
1393 				goto out_finish;
1394 			}
1395 
1396 			if (unlikely(!data[thr].unc_len ||
1397 			             data[thr].unc_len > LZO_UNC_SIZE ||
1398 			             data[thr].unc_len & (PAGE_SIZE - 1))) {
1399 				pr_err("Invalid LZO uncompressed length\n");
1400 				ret = -1;
1401 				goto out_finish;
1402 			}
1403 
1404 			for (off = 0;
1405 			     off < data[thr].unc_len; off += PAGE_SIZE) {
1406 				memcpy(data_of(*snapshot),
1407 				       data[thr].unc + off, PAGE_SIZE);
1408 
1409 				if (!(nr_pages % m))
1410 					pr_info("Image loading progress: %3d%%\n",
1411 						nr_pages / m * 10);
1412 				nr_pages++;
1413 
1414 				ret = snapshot_write_next(snapshot);
1415 				if (ret <= 0) {
1416 					crc->run_threads = thr + 1;
1417 					atomic_set(&crc->ready, 1);
1418 					wake_up(&crc->go);
1419 					goto out_finish;
1420 				}
1421 			}
1422 		}
1423 
1424 		crc->run_threads = thr;
1425 		atomic_set(&crc->ready, 1);
1426 		wake_up(&crc->go);
1427 	}
1428 
1429 out_finish:
1430 	if (crc->run_threads) {
1431 		wait_event(crc->done, atomic_read(&crc->stop));
1432 		atomic_set(&crc->stop, 0);
1433 	}
1434 	stop = ktime_get();
1435 	if (!ret) {
1436 		pr_info("Image loading done\n");
1437 		snapshot_write_finalize(snapshot);
1438 		if (!snapshot_image_loaded(snapshot))
1439 			ret = -ENODATA;
1440 		if (!ret) {
1441 			if (swsusp_header->flags & SF_CRC32_MODE) {
1442 				if(handle->crc32 != swsusp_header->crc32) {
1443 					pr_err("Invalid image CRC32!\n");
1444 					ret = -ENODATA;
1445 				}
1446 			}
1447 		}
1448 	}
1449 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1450 out_clean:
1451 	for (i = 0; i < ring_size; i++)
1452 		free_page((unsigned long)page[i]);
1453 	if (crc) {
1454 		if (crc->thr)
1455 			kthread_stop(crc->thr);
1456 		kfree(crc);
1457 	}
1458 	if (data) {
1459 		for (thr = 0; thr < nr_threads; thr++)
1460 			if (data[thr].thr)
1461 				kthread_stop(data[thr].thr);
1462 		vfree(data);
1463 	}
1464 	vfree(page);
1465 
1466 	return ret;
1467 }
1468 
1469 /**
1470  *	swsusp_read - read the hibernation image.
1471  *	@flags_p: flags passed by the "frozen" kernel in the image header should
1472  *		  be written into this memory location
1473  */
1474 
1475 int swsusp_read(unsigned int *flags_p)
1476 {
1477 	int error;
1478 	struct swap_map_handle handle;
1479 	struct snapshot_handle snapshot;
1480 	struct swsusp_info *header;
1481 
1482 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1483 	error = snapshot_write_next(&snapshot);
1484 	if (error < (int)PAGE_SIZE)
1485 		return error < 0 ? error : -EFAULT;
1486 	header = (struct swsusp_info *)data_of(snapshot);
1487 	error = get_swap_reader(&handle, flags_p);
1488 	if (error)
1489 		goto end;
1490 	if (!error)
1491 		error = swap_read_page(&handle, header, NULL);
1492 	if (!error) {
1493 		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1494 			load_image(&handle, &snapshot, header->pages - 1) :
1495 			load_image_lzo(&handle, &snapshot, header->pages - 1);
1496 	}
1497 	swap_reader_finish(&handle);
1498 end:
1499 	if (!error)
1500 		pr_debug("Image successfully loaded\n");
1501 	else
1502 		pr_debug("Error %d resuming\n", error);
1503 	return error;
1504 }
1505 
1506 /**
1507  *      swsusp_check - Check for swsusp signature in the resume device
1508  */
1509 
1510 int swsusp_check(void)
1511 {
1512 	int error;
1513 
1514 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1515 					    FMODE_READ, NULL);
1516 	if (!IS_ERR(hib_resume_bdev)) {
1517 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
1518 		clear_page(swsusp_header);
1519 		error = hib_submit_io(REQ_OP_READ, 0,
1520 					swsusp_resume_block,
1521 					swsusp_header, NULL);
1522 		if (error)
1523 			goto put;
1524 
1525 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1526 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1527 			/* Reset swap signature now */
1528 			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1529 						swsusp_resume_block,
1530 						swsusp_header, NULL);
1531 		} else {
1532 			error = -EINVAL;
1533 		}
1534 
1535 put:
1536 		if (error)
1537 			blkdev_put(hib_resume_bdev, FMODE_READ);
1538 		else
1539 			pr_debug("Image signature found, resuming\n");
1540 	} else {
1541 		error = PTR_ERR(hib_resume_bdev);
1542 	}
1543 
1544 	if (error)
1545 		pr_debug("Image not found (code %d)\n", error);
1546 
1547 	return error;
1548 }
1549 
1550 /**
1551  *	swsusp_close - close swap device.
1552  */
1553 
1554 void swsusp_close(fmode_t mode)
1555 {
1556 	if (IS_ERR(hib_resume_bdev)) {
1557 		pr_debug("Image device not initialised\n");
1558 		return;
1559 	}
1560 
1561 	blkdev_put(hib_resume_bdev, mode);
1562 }
1563 
1564 /**
1565  *      swsusp_unmark - Unmark swsusp signature in the resume device
1566  */
1567 
1568 #ifdef CONFIG_SUSPEND
1569 int swsusp_unmark(void)
1570 {
1571 	int error;
1572 
1573 	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1574 		      swsusp_header, NULL);
1575 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1576 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1577 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1578 					swsusp_resume_block,
1579 					swsusp_header, NULL);
1580 	} else {
1581 		pr_err("Cannot find swsusp signature!\n");
1582 		error = -ENODEV;
1583 	}
1584 
1585 	/*
1586 	 * We just returned from suspend, we don't need the image any more.
1587 	 */
1588 	free_all_swap_pages(root_swap);
1589 
1590 	return error;
1591 }
1592 #endif
1593 
1594 static int swsusp_header_init(void)
1595 {
1596 	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1597 	if (!swsusp_header)
1598 		panic("Could not allocate memory for swsusp_header\n");
1599 	return 0;
1600 }
1601 
1602 core_initcall(swsusp_header_init);
1603