xref: /openbmc/linux/kernel/power/swap.c (revision 7692e29d)
155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
261159a31SRafael J. Wysocki /*
361159a31SRafael J. Wysocki  * linux/kernel/power/swap.c
461159a31SRafael J. Wysocki  *
561159a31SRafael J. Wysocki  * This file provides functions for reading the suspend image from
661159a31SRafael J. Wysocki  * and writing it to a swap partition.
761159a31SRafael J. Wysocki  *
8a2531293SPavel Machek  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
961159a31SRafael J. Wysocki  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
105a21d489SBojan Smojver  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
1161159a31SRafael J. Wysocki  */
1261159a31SRafael J. Wysocki 
1364ec72a1SJoe Perches #define pr_fmt(fmt) "PM: " fmt
1464ec72a1SJoe Perches 
1561159a31SRafael J. Wysocki #include <linux/module.h>
1661159a31SRafael J. Wysocki #include <linux/file.h>
1761159a31SRafael J. Wysocki #include <linux/delay.h>
1861159a31SRafael J. Wysocki #include <linux/bitops.h>
1961159a31SRafael J. Wysocki #include <linux/device.h>
2061159a31SRafael J. Wysocki #include <linux/bio.h>
21546e0d27SAndrew Morton #include <linux/blkdev.h>
2261159a31SRafael J. Wysocki #include <linux/swap.h>
2361159a31SRafael J. Wysocki #include <linux/swapops.h>
2461159a31SRafael J. Wysocki #include <linux/pm.h>
255a0e3ad6STejun Heo #include <linux/slab.h>
26f996fc96SBojan Smojver #include <linux/lzo.h>
27f996fc96SBojan Smojver #include <linux/vmalloc.h>
28081a9d04SBojan Smojver #include <linux/cpumask.h>
29081a9d04SBojan Smojver #include <linux/atomic.h>
30081a9d04SBojan Smojver #include <linux/kthread.h>
31081a9d04SBojan Smojver #include <linux/crc32.h>
32db597605STina Ruchandani #include <linux/ktime.h>
3361159a31SRafael J. Wysocki 
3461159a31SRafael J. Wysocki #include "power.h"
3561159a31SRafael J. Wysocki 
36be8cd644SRafael J. Wysocki #define HIBERNATE_SIG	"S1SUSPEND"
3761159a31SRafael J. Wysocki 
3874d95555SDavid Woodhouse u32 swsusp_hardware_signature;
3974d95555SDavid Woodhouse 
4051fb352bSJiri Slaby /*
41f6cf0545SJames Morse  * When reading an {un,}compressed image, we may restore pages in place,
42f6cf0545SJames Morse  * in which case some architectures need these pages cleaning before they
43f6cf0545SJames Morse  * can be executed. We don't know which pages these may be, so clean the lot.
44f6cf0545SJames Morse  */
45f6cf0545SJames Morse static bool clean_pages_on_read;
46f6cf0545SJames Morse static bool clean_pages_on_decompress;
47f6cf0545SJames Morse 
48f6cf0545SJames Morse /*
4951fb352bSJiri Slaby  *	The swap map is a data structure used for keeping track of each page
5051fb352bSJiri Slaby  *	written to a swap partition.  It consists of many swap_map_page
5190133673SCesar Eduardo Barros  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
5251fb352bSJiri Slaby  *	These structures are stored on the swap and linked together with the
5351fb352bSJiri Slaby  *	help of the .next_swap member.
5451fb352bSJiri Slaby  *
5551fb352bSJiri Slaby  *	The swap map is created during suspend.  The swap map pages are
5651fb352bSJiri Slaby  *	allocated and populated one at a time, so we only need one memory
5751fb352bSJiri Slaby  *	page to set up the entire structure.
5851fb352bSJiri Slaby  *
59081a9d04SBojan Smojver  *	During resume we pick up all swap_map_page structures into a list.
6051fb352bSJiri Slaby  */
6151fb352bSJiri Slaby 
6251fb352bSJiri Slaby #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
6351fb352bSJiri Slaby 
64f8262d47SBojan Smojver /*
65f8262d47SBojan Smojver  * Number of free pages that are not high.
66f8262d47SBojan Smojver  */
low_free_pages(void)67f8262d47SBojan Smojver static inline unsigned long low_free_pages(void)
68f8262d47SBojan Smojver {
69f8262d47SBojan Smojver 	return nr_free_pages() - nr_free_highpages();
70f8262d47SBojan Smojver }
71f8262d47SBojan Smojver 
72f8262d47SBojan Smojver /*
73f8262d47SBojan Smojver  * Number of pages required to be kept free while writing the image. Always
74f8262d47SBojan Smojver  * half of all available low pages before the writing starts.
75f8262d47SBojan Smojver  */
reqd_free_pages(void)76f8262d47SBojan Smojver static inline unsigned long reqd_free_pages(void)
77f8262d47SBojan Smojver {
78f8262d47SBojan Smojver 	return low_free_pages() / 2;
79f8262d47SBojan Smojver }
80f8262d47SBojan Smojver 
8151fb352bSJiri Slaby struct swap_map_page {
8251fb352bSJiri Slaby 	sector_t entries[MAP_PAGE_ENTRIES];
8351fb352bSJiri Slaby 	sector_t next_swap;
8451fb352bSJiri Slaby };
8551fb352bSJiri Slaby 
86081a9d04SBojan Smojver struct swap_map_page_list {
87081a9d04SBojan Smojver 	struct swap_map_page *map;
88081a9d04SBojan Smojver 	struct swap_map_page_list *next;
89081a9d04SBojan Smojver };
90081a9d04SBojan Smojver 
91444e1154SJiapeng Chong /*
9251fb352bSJiri Slaby  *	The swap_map_handle structure is used for handling swap in
9351fb352bSJiri Slaby  *	a file-alike way
9451fb352bSJiri Slaby  */
9551fb352bSJiri Slaby 
9651fb352bSJiri Slaby struct swap_map_handle {
9751fb352bSJiri Slaby 	struct swap_map_page *cur;
98081a9d04SBojan Smojver 	struct swap_map_page_list *maps;
9951fb352bSJiri Slaby 	sector_t cur_swap;
10051fb352bSJiri Slaby 	sector_t first_sector;
10151fb352bSJiri Slaby 	unsigned int k;
102f8262d47SBojan Smojver 	unsigned long reqd_free_pages;
103081a9d04SBojan Smojver 	u32 crc32;
10451fb352bSJiri Slaby };
10551fb352bSJiri Slaby 
1061b29c164SVivek Goyal struct swsusp_header {
107081a9d04SBojan Smojver 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
10874d95555SDavid Woodhouse 	              sizeof(u32) - sizeof(u32)];
10974d95555SDavid Woodhouse 	u32	hw_sig;
110081a9d04SBojan Smojver 	u32	crc32;
1113aef83e0SRafael J. Wysocki 	sector_t image;
112a634cc10SRafael J. Wysocki 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
11361159a31SRafael J. Wysocki 	char	orig_sig[10];
11461159a31SRafael J. Wysocki 	char	sig[10];
11552f5684cSGideon Israel Dsouza } __packed;
1161b29c164SVivek Goyal 
1171b29c164SVivek Goyal static struct swsusp_header *swsusp_header;
11861159a31SRafael J. Wysocki 
119444e1154SJiapeng Chong /*
1200414f2ecSNigel Cunningham  *	The following functions are used for tracing the allocated
1210414f2ecSNigel Cunningham  *	swap pages, so that they can be freed in case of an error.
1220414f2ecSNigel Cunningham  */
1230414f2ecSNigel Cunningham 
1240414f2ecSNigel Cunningham struct swsusp_extent {
1250414f2ecSNigel Cunningham 	struct rb_node node;
1260414f2ecSNigel Cunningham 	unsigned long start;
1270414f2ecSNigel Cunningham 	unsigned long end;
1280414f2ecSNigel Cunningham };
1290414f2ecSNigel Cunningham 
1300414f2ecSNigel Cunningham static struct rb_root swsusp_extents = RB_ROOT;
1310414f2ecSNigel Cunningham 
swsusp_extents_insert(unsigned long swap_offset)1320414f2ecSNigel Cunningham static int swsusp_extents_insert(unsigned long swap_offset)
1330414f2ecSNigel Cunningham {
1340414f2ecSNigel Cunningham 	struct rb_node **new = &(swsusp_extents.rb_node);
1350414f2ecSNigel Cunningham 	struct rb_node *parent = NULL;
1360414f2ecSNigel Cunningham 	struct swsusp_extent *ext;
1370414f2ecSNigel Cunningham 
1380414f2ecSNigel Cunningham 	/* Figure out where to put the new node */
1390414f2ecSNigel Cunningham 	while (*new) {
1408316bd72SDavidlohr Bueso 		ext = rb_entry(*new, struct swsusp_extent, node);
1410414f2ecSNigel Cunningham 		parent = *new;
1420414f2ecSNigel Cunningham 		if (swap_offset < ext->start) {
1430414f2ecSNigel Cunningham 			/* Try to merge */
1440414f2ecSNigel Cunningham 			if (swap_offset == ext->start - 1) {
1450414f2ecSNigel Cunningham 				ext->start--;
1460414f2ecSNigel Cunningham 				return 0;
1470414f2ecSNigel Cunningham 			}
1480414f2ecSNigel Cunningham 			new = &((*new)->rb_left);
1490414f2ecSNigel Cunningham 		} else if (swap_offset > ext->end) {
1500414f2ecSNigel Cunningham 			/* Try to merge */
1510414f2ecSNigel Cunningham 			if (swap_offset == ext->end + 1) {
1520414f2ecSNigel Cunningham 				ext->end++;
1530414f2ecSNigel Cunningham 				return 0;
1540414f2ecSNigel Cunningham 			}
1550414f2ecSNigel Cunningham 			new = &((*new)->rb_right);
1560414f2ecSNigel Cunningham 		} else {
1570414f2ecSNigel Cunningham 			/* It already is in the tree */
1580414f2ecSNigel Cunningham 			return -EINVAL;
1590414f2ecSNigel Cunningham 		}
1600414f2ecSNigel Cunningham 	}
1610414f2ecSNigel Cunningham 	/* Add the new node and rebalance the tree. */
1620414f2ecSNigel Cunningham 	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
1630414f2ecSNigel Cunningham 	if (!ext)
1640414f2ecSNigel Cunningham 		return -ENOMEM;
1650414f2ecSNigel Cunningham 
1660414f2ecSNigel Cunningham 	ext->start = swap_offset;
1670414f2ecSNigel Cunningham 	ext->end = swap_offset;
1680414f2ecSNigel Cunningham 	rb_link_node(&ext->node, parent, new);
1690414f2ecSNigel Cunningham 	rb_insert_color(&ext->node, &swsusp_extents);
1700414f2ecSNigel Cunningham 	return 0;
1710414f2ecSNigel Cunningham }
1720414f2ecSNigel Cunningham 
173444e1154SJiapeng Chong /*
1740414f2ecSNigel Cunningham  *	alloc_swapdev_block - allocate a swap page and register that it has
1750414f2ecSNigel Cunningham  *	been allocated, so that it can be freed in case of an error.
1760414f2ecSNigel Cunningham  */
1770414f2ecSNigel Cunningham 
alloc_swapdev_block(int swap)1780414f2ecSNigel Cunningham sector_t alloc_swapdev_block(int swap)
1790414f2ecSNigel Cunningham {
1800414f2ecSNigel Cunningham 	unsigned long offset;
1810414f2ecSNigel Cunningham 
182910321eaSHugh Dickins 	offset = swp_offset(get_swap_page_of_type(swap));
1830414f2ecSNigel Cunningham 	if (offset) {
1840414f2ecSNigel Cunningham 		if (swsusp_extents_insert(offset))
185910321eaSHugh Dickins 			swap_free(swp_entry(swap, offset));
1860414f2ecSNigel Cunningham 		else
1870414f2ecSNigel Cunningham 			return swapdev_block(swap, offset);
1880414f2ecSNigel Cunningham 	}
1890414f2ecSNigel Cunningham 	return 0;
1900414f2ecSNigel Cunningham }
1910414f2ecSNigel Cunningham 
192444e1154SJiapeng Chong /*
1930414f2ecSNigel Cunningham  *	free_all_swap_pages - free swap pages allocated for saving image data.
19490133673SCesar Eduardo Barros  *	It also frees the extents used to register which swap entries had been
1950414f2ecSNigel Cunningham  *	allocated.
1960414f2ecSNigel Cunningham  */
1970414f2ecSNigel Cunningham 
free_all_swap_pages(int swap)1980414f2ecSNigel Cunningham void free_all_swap_pages(int swap)
1990414f2ecSNigel Cunningham {
2000414f2ecSNigel Cunningham 	struct rb_node *node;
2010414f2ecSNigel Cunningham 
2020414f2ecSNigel Cunningham 	while ((node = swsusp_extents.rb_node)) {
2030414f2ecSNigel Cunningham 		struct swsusp_extent *ext;
2040414f2ecSNigel Cunningham 		unsigned long offset;
2050414f2ecSNigel Cunningham 
20647087eebSGeliang Tang 		ext = rb_entry(node, struct swsusp_extent, node);
2070414f2ecSNigel Cunningham 		rb_erase(node, &swsusp_extents);
2080414f2ecSNigel Cunningham 		for (offset = ext->start; offset <= ext->end; offset++)
209910321eaSHugh Dickins 			swap_free(swp_entry(swap, offset));
2100414f2ecSNigel Cunningham 
2110414f2ecSNigel Cunningham 		kfree(ext);
2120414f2ecSNigel Cunningham 	}
2130414f2ecSNigel Cunningham }
2140414f2ecSNigel Cunningham 
swsusp_swap_in_use(void)2150414f2ecSNigel Cunningham int swsusp_swap_in_use(void)
2160414f2ecSNigel Cunningham {
2170414f2ecSNigel Cunningham 	return (swsusp_extents.rb_node != NULL);
2180414f2ecSNigel Cunningham }
2190414f2ecSNigel Cunningham 
22061159a31SRafael J. Wysocki /*
2213fc6b34fSRafael J. Wysocki  * General things
22261159a31SRafael J. Wysocki  */
22361159a31SRafael J. Wysocki 
22461159a31SRafael J. Wysocki static unsigned short root_swap = 0xffff;
225343df3c7SChristoph Hellwig static struct block_device *hib_resume_bdev;
226343df3c7SChristoph Hellwig 
227343df3c7SChristoph Hellwig struct hib_bio_batch {
228343df3c7SChristoph Hellwig 	atomic_t		count;
229343df3c7SChristoph Hellwig 	wait_queue_head_t	wait;
2304e4cbee9SChristoph Hellwig 	blk_status_t		error;
23155c4478aSXiaoyi Chen 	struct blk_plug		plug;
232343df3c7SChristoph Hellwig };
233343df3c7SChristoph Hellwig 
hib_init_batch(struct hib_bio_batch * hb)234343df3c7SChristoph Hellwig static void hib_init_batch(struct hib_bio_batch *hb)
235343df3c7SChristoph Hellwig {
236343df3c7SChristoph Hellwig 	atomic_set(&hb->count, 0);
237343df3c7SChristoph Hellwig 	init_waitqueue_head(&hb->wait);
2384e4cbee9SChristoph Hellwig 	hb->error = BLK_STS_OK;
23955c4478aSXiaoyi Chen 	blk_start_plug(&hb->plug);
24055c4478aSXiaoyi Chen }
24155c4478aSXiaoyi Chen 
hib_finish_batch(struct hib_bio_batch * hb)24255c4478aSXiaoyi Chen static void hib_finish_batch(struct hib_bio_batch *hb)
24355c4478aSXiaoyi Chen {
24455c4478aSXiaoyi Chen 	blk_finish_plug(&hb->plug);
245343df3c7SChristoph Hellwig }
246343df3c7SChristoph Hellwig 
hib_end_io(struct bio * bio)2474246a0b6SChristoph Hellwig static void hib_end_io(struct bio *bio)
248343df3c7SChristoph Hellwig {
249343df3c7SChristoph Hellwig 	struct hib_bio_batch *hb = bio->bi_private;
250263663cdSMing Lei 	struct page *page = bio_first_page_all(bio);
251343df3c7SChristoph Hellwig 
2524e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
25364ec72a1SJoe Perches 		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
25474d46992SChristoph Hellwig 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255343df3c7SChristoph Hellwig 			 (unsigned long long)bio->bi_iter.bi_sector);
256343df3c7SChristoph Hellwig 	}
257343df3c7SChristoph Hellwig 
258343df3c7SChristoph Hellwig 	if (bio_data_dir(bio) == WRITE)
259343df3c7SChristoph Hellwig 		put_page(page);
260f6cf0545SJames Morse 	else if (clean_pages_on_read)
261f6cf0545SJames Morse 		flush_icache_range((unsigned long)page_address(page),
262f6cf0545SJames Morse 				   (unsigned long)page_address(page) + PAGE_SIZE);
263343df3c7SChristoph Hellwig 
2644e4cbee9SChristoph Hellwig 	if (bio->bi_status && !hb->error)
2654e4cbee9SChristoph Hellwig 		hb->error = bio->bi_status;
266343df3c7SChristoph Hellwig 	if (atomic_dec_and_test(&hb->count))
267343df3c7SChristoph Hellwig 		wake_up(&hb->wait);
268343df3c7SChristoph Hellwig 
269343df3c7SChristoph Hellwig 	bio_put(bio);
270343df3c7SChristoph Hellwig }
271343df3c7SChristoph Hellwig 
hib_submit_io(blk_opf_t opf,pgoff_t page_off,void * addr,struct hib_bio_batch * hb)272568e34edSBart Van Assche static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273343df3c7SChristoph Hellwig 			 struct hib_bio_batch *hb)
274343df3c7SChristoph Hellwig {
275343df3c7SChristoph Hellwig 	struct page *page = virt_to_page(addr);
276343df3c7SChristoph Hellwig 	struct bio *bio;
277343df3c7SChristoph Hellwig 	int error = 0;
278343df3c7SChristoph Hellwig 
279568e34edSBart Van Assche 	bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
280343df3c7SChristoph Hellwig 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
281343df3c7SChristoph Hellwig 
282343df3c7SChristoph Hellwig 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
28364ec72a1SJoe Perches 		pr_err("Adding page to bio failed at %llu\n",
284343df3c7SChristoph Hellwig 		       (unsigned long long)bio->bi_iter.bi_sector);
285343df3c7SChristoph Hellwig 		bio_put(bio);
286343df3c7SChristoph Hellwig 		return -EFAULT;
287343df3c7SChristoph Hellwig 	}
288343df3c7SChristoph Hellwig 
289343df3c7SChristoph Hellwig 	if (hb) {
290343df3c7SChristoph Hellwig 		bio->bi_end_io = hib_end_io;
291343df3c7SChristoph Hellwig 		bio->bi_private = hb;
292343df3c7SChristoph Hellwig 		atomic_inc(&hb->count);
2934e49ea4aSMike Christie 		submit_bio(bio);
294343df3c7SChristoph Hellwig 	} else {
2954e49ea4aSMike Christie 		error = submit_bio_wait(bio);
296343df3c7SChristoph Hellwig 		bio_put(bio);
297343df3c7SChristoph Hellwig 	}
298343df3c7SChristoph Hellwig 
299343df3c7SChristoph Hellwig 	return error;
300343df3c7SChristoph Hellwig }
301343df3c7SChristoph Hellwig 
hib_wait_io(struct hib_bio_batch * hb)30201de5fcdSAnders Roxell static int hib_wait_io(struct hib_bio_batch *hb)
303343df3c7SChristoph Hellwig {
30455c4478aSXiaoyi Chen 	/*
30555c4478aSXiaoyi Chen 	 * We are relying on the behavior of blk_plug that a thread with
30655c4478aSXiaoyi Chen 	 * a plug will flush the plug list before sleeping.
30755c4478aSXiaoyi Chen 	 */
308343df3c7SChristoph Hellwig 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
30901de5fcdSAnders Roxell 	return blk_status_to_errno(hb->error);
310343df3c7SChristoph Hellwig }
3113fc6b34fSRafael J. Wysocki 
3123fc6b34fSRafael J. Wysocki /*
3133fc6b34fSRafael J. Wysocki  * Saving part
3143fc6b34fSRafael J. Wysocki  */
mark_swapfiles(struct swap_map_handle * handle,unsigned int flags)31551fb352bSJiri Slaby static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
31661159a31SRafael J. Wysocki {
31761159a31SRafael J. Wysocki 	int error;
31861159a31SRafael J. Wysocki 
319568e34edSBart Van Assche 	hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
3201b29c164SVivek Goyal 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
3211b29c164SVivek Goyal 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
3221b29c164SVivek Goyal 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
3233624eb04SRafael J. Wysocki 		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
32451fb352bSJiri Slaby 		swsusp_header->image = handle->first_sector;
32574d95555SDavid Woodhouse 		if (swsusp_hardware_signature) {
32674d95555SDavid Woodhouse 			swsusp_header->hw_sig = swsusp_hardware_signature;
32774d95555SDavid Woodhouse 			flags |= SF_HW_SIG;
32874d95555SDavid Woodhouse 		}
329a634cc10SRafael J. Wysocki 		swsusp_header->flags = flags;
330081a9d04SBojan Smojver 		if (flags & SF_CRC32_MODE)
331081a9d04SBojan Smojver 			swsusp_header->crc32 = handle->crc32;
332568e34edSBart Van Assche 		error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
333162b99e3SMike Christie 				      swsusp_resume_block, swsusp_header, NULL);
33461159a31SRafael J. Wysocki 	} else {
33564ec72a1SJoe Perches 		pr_err("Swap header not found!\n");
33661159a31SRafael J. Wysocki 		error = -ENODEV;
33761159a31SRafael J. Wysocki 	}
33861159a31SRafael J. Wysocki 	return error;
33961159a31SRafael J. Wysocki }
34061159a31SRafael J. Wysocki 
34161159a31SRafael J. Wysocki /**
34261159a31SRafael J. Wysocki  *	swsusp_swap_check - check if the resume device is a swap device
34361159a31SRafael J. Wysocki  *	and get its index (if so)
3446f612af5SJiri Slaby  *
3456f612af5SJiri Slaby  *	This is called before saving image
34661159a31SRafael J. Wysocki  */
swsusp_swap_check(void)3476f612af5SJiri Slaby static int swsusp_swap_check(void)
34861159a31SRafael J. Wysocki {
3493aef83e0SRafael J. Wysocki 	int res;
35061159a31SRafael J. Wysocki 
35121bd9005SChristoph Hellwig 	if (swsusp_resume_device)
35221bd9005SChristoph Hellwig 		res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
35321bd9005SChristoph Hellwig 	else
35421bd9005SChristoph Hellwig 		res = find_first_swap(&swsusp_resume_device);
3553aef83e0SRafael J. Wysocki 	if (res < 0)
3563aef83e0SRafael J. Wysocki 		return res;
35761159a31SRafael J. Wysocki 	root_swap = res;
35821bd9005SChristoph Hellwig 
35905bdb996SChristoph Hellwig 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
36005bdb996SChristoph Hellwig 			BLK_OPEN_WRITE, NULL, NULL);
36136daaa98SChristoph Hellwig 	if (IS_ERR(hib_resume_bdev))
36236daaa98SChristoph Hellwig 		return PTR_ERR(hib_resume_bdev);
3633aef83e0SRafael J. Wysocki 
3648a0d613fSJiri Slaby 	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
3653aef83e0SRafael J. Wysocki 	if (res < 0)
3662736e8eeSChristoph Hellwig 		blkdev_put(hib_resume_bdev, NULL);
3673aef83e0SRafael J. Wysocki 
36861159a31SRafael J. Wysocki 	return res;
36961159a31SRafael J. Wysocki }
37061159a31SRafael J. Wysocki 
37161159a31SRafael J. Wysocki /**
37261159a31SRafael J. Wysocki  *	write_page - Write one page to given swap location.
37361159a31SRafael J. Wysocki  *	@buf:		Address we're writing.
37461159a31SRafael J. Wysocki  *	@offset:	Offset of the swap page we're writing to.
375343df3c7SChristoph Hellwig  *	@hb:		bio completion batch
37661159a31SRafael J. Wysocki  */
37761159a31SRafael J. Wysocki 
write_page(void * buf,sector_t offset,struct hib_bio_batch * hb)378343df3c7SChristoph Hellwig static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
37961159a31SRafael J. Wysocki {
3803aef83e0SRafael J. Wysocki 	void *src;
381081a9d04SBojan Smojver 	int ret;
38261159a31SRafael J. Wysocki 
3833aef83e0SRafael J. Wysocki 	if (!offset)
3843aef83e0SRafael J. Wysocki 		return -ENOSPC;
385ab954160SAndrew Morton 
386343df3c7SChristoph Hellwig 	if (hb) {
3870eb0b63cSChristoph Hellwig 		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
3885a21d489SBojan Smojver 		                              __GFP_NORETRY);
3893aef83e0SRafael J. Wysocki 		if (src) {
3903ecb01dfSJan Beulich 			copy_page(src, buf);
3913aef83e0SRafael J. Wysocki 		} else {
392343df3c7SChristoph Hellwig 			ret = hib_wait_io(hb); /* Free pages */
393081a9d04SBojan Smojver 			if (ret)
394081a9d04SBojan Smojver 				return ret;
3950eb0b63cSChristoph Hellwig 			src = (void *)__get_free_page(GFP_NOIO |
3965a21d489SBojan Smojver 			                              __GFP_NOWARN |
3975a21d489SBojan Smojver 			                              __GFP_NORETRY);
398081a9d04SBojan Smojver 			if (src) {
399081a9d04SBojan Smojver 				copy_page(src, buf);
400081a9d04SBojan Smojver 			} else {
401ab954160SAndrew Morton 				WARN_ON_ONCE(1);
402343df3c7SChristoph Hellwig 				hb = NULL;	/* Go synchronous */
4033aef83e0SRafael J. Wysocki 				src = buf;
4043aef83e0SRafael J. Wysocki 			}
405081a9d04SBojan Smojver 		}
406ab954160SAndrew Morton 	} else {
4073aef83e0SRafael J. Wysocki 		src = buf;
408ab954160SAndrew Morton 	}
409568e34edSBart Van Assche 	return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
41061159a31SRafael J. Wysocki }
41161159a31SRafael J. Wysocki 
release_swap_writer(struct swap_map_handle * handle)41261159a31SRafael J. Wysocki static void release_swap_writer(struct swap_map_handle *handle)
41361159a31SRafael J. Wysocki {
41461159a31SRafael J. Wysocki 	if (handle->cur)
41561159a31SRafael J. Wysocki 		free_page((unsigned long)handle->cur);
41661159a31SRafael J. Wysocki 	handle->cur = NULL;
41761159a31SRafael J. Wysocki }
41861159a31SRafael J. Wysocki 
get_swap_writer(struct swap_map_handle * handle)41961159a31SRafael J. Wysocki static int get_swap_writer(struct swap_map_handle *handle)
42061159a31SRafael J. Wysocki {
4216f612af5SJiri Slaby 	int ret;
4226f612af5SJiri Slaby 
4236f612af5SJiri Slaby 	ret = swsusp_swap_check();
4246f612af5SJiri Slaby 	if (ret) {
4256f612af5SJiri Slaby 		if (ret != -ENOSPC)
42664ec72a1SJoe Perches 			pr_err("Cannot find swap device, try swapon -a\n");
4276f612af5SJiri Slaby 		return ret;
4286f612af5SJiri Slaby 	}
42961159a31SRafael J. Wysocki 	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
4306f612af5SJiri Slaby 	if (!handle->cur) {
4316f612af5SJiri Slaby 		ret = -ENOMEM;
4326f612af5SJiri Slaby 		goto err_close;
4336f612af5SJiri Slaby 	}
434d1d241ccSRafael J. Wysocki 	handle->cur_swap = alloc_swapdev_block(root_swap);
43561159a31SRafael J. Wysocki 	if (!handle->cur_swap) {
4366f612af5SJiri Slaby 		ret = -ENOSPC;
4376f612af5SJiri Slaby 		goto err_rel;
43861159a31SRafael J. Wysocki 	}
43961159a31SRafael J. Wysocki 	handle->k = 0;
440f8262d47SBojan Smojver 	handle->reqd_free_pages = reqd_free_pages();
44151fb352bSJiri Slaby 	handle->first_sector = handle->cur_swap;
44261159a31SRafael J. Wysocki 	return 0;
4436f612af5SJiri Slaby err_rel:
4446f612af5SJiri Slaby 	release_swap_writer(handle);
4456f612af5SJiri Slaby err_close:
4462736e8eeSChristoph Hellwig 	swsusp_close(false);
4476f612af5SJiri Slaby 	return ret;
44861159a31SRafael J. Wysocki }
44961159a31SRafael J. Wysocki 
swap_write_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)450ab954160SAndrew Morton static int swap_write_page(struct swap_map_handle *handle, void *buf,
451343df3c7SChristoph Hellwig 		struct hib_bio_batch *hb)
452ab954160SAndrew Morton {
453ab954160SAndrew Morton 	int error = 0;
4543aef83e0SRafael J. Wysocki 	sector_t offset;
45561159a31SRafael J. Wysocki 
45661159a31SRafael J. Wysocki 	if (!handle->cur)
45761159a31SRafael J. Wysocki 		return -EINVAL;
458d1d241ccSRafael J. Wysocki 	offset = alloc_swapdev_block(root_swap);
459343df3c7SChristoph Hellwig 	error = write_page(buf, offset, hb);
46061159a31SRafael J. Wysocki 	if (error)
46161159a31SRafael J. Wysocki 		return error;
46261159a31SRafael J. Wysocki 	handle->cur->entries[handle->k++] = offset;
46361159a31SRafael J. Wysocki 	if (handle->k >= MAP_PAGE_ENTRIES) {
464d1d241ccSRafael J. Wysocki 		offset = alloc_swapdev_block(root_swap);
46561159a31SRafael J. Wysocki 		if (!offset)
46661159a31SRafael J. Wysocki 			return -ENOSPC;
46761159a31SRafael J. Wysocki 		handle->cur->next_swap = offset;
468343df3c7SChristoph Hellwig 		error = write_page(handle->cur, handle->cur_swap, hb);
46961159a31SRafael J. Wysocki 		if (error)
470ab954160SAndrew Morton 			goto out;
4713ecb01dfSJan Beulich 		clear_page(handle->cur);
47261159a31SRafael J. Wysocki 		handle->cur_swap = offset;
47361159a31SRafael J. Wysocki 		handle->k = 0;
4745a21d489SBojan Smojver 
475343df3c7SChristoph Hellwig 		if (hb && low_free_pages() <= handle->reqd_free_pages) {
476343df3c7SChristoph Hellwig 			error = hib_wait_io(hb);
477081a9d04SBojan Smojver 			if (error)
478081a9d04SBojan Smojver 				goto out;
4795a21d489SBojan Smojver 			/*
4805a21d489SBojan Smojver 			 * Recalculate the number of required free pages, to
4815a21d489SBojan Smojver 			 * make sure we never take more than half.
4825a21d489SBojan Smojver 			 */
483f8262d47SBojan Smojver 			handle->reqd_free_pages = reqd_free_pages();
484081a9d04SBojan Smojver 		}
4855a21d489SBojan Smojver 	}
486ab954160SAndrew Morton  out:
487ab954160SAndrew Morton 	return error;
48861159a31SRafael J. Wysocki }
48961159a31SRafael J. Wysocki 
flush_swap_writer(struct swap_map_handle * handle)49061159a31SRafael J. Wysocki static int flush_swap_writer(struct swap_map_handle *handle)
49161159a31SRafael J. Wysocki {
49261159a31SRafael J. Wysocki 	if (handle->cur && handle->cur_swap)
493ab954160SAndrew Morton 		return write_page(handle->cur, handle->cur_swap, NULL);
49461159a31SRafael J. Wysocki 	else
49561159a31SRafael J. Wysocki 		return -EINVAL;
49661159a31SRafael J. Wysocki }
49761159a31SRafael J. Wysocki 
swap_writer_finish(struct swap_map_handle * handle,unsigned int flags,int error)4986f612af5SJiri Slaby static int swap_writer_finish(struct swap_map_handle *handle,
4996f612af5SJiri Slaby 		unsigned int flags, int error)
5006f612af5SJiri Slaby {
5016f612af5SJiri Slaby 	if (!error) {
50264ec72a1SJoe Perches 		pr_info("S");
5036f612af5SJiri Slaby 		error = mark_swapfiles(handle, flags);
50464ec72a1SJoe Perches 		pr_cont("|\n");
505fef9c8d2SLaurent Badel 		flush_swap_writer(handle);
5066f612af5SJiri Slaby 	}
5076f612af5SJiri Slaby 
5086f612af5SJiri Slaby 	if (error)
5096f612af5SJiri Slaby 		free_all_swap_pages(root_swap);
5106f612af5SJiri Slaby 	release_swap_writer(handle);
5112736e8eeSChristoph Hellwig 	swsusp_close(false);
5126f612af5SJiri Slaby 
5136f612af5SJiri Slaby 	return error;
5146f612af5SJiri Slaby }
5156f612af5SJiri Slaby 
516f996fc96SBojan Smojver /* We need to remember how much compressed data we need to read. */
517f996fc96SBojan Smojver #define LZO_HEADER	sizeof(size_t)
518f996fc96SBojan Smojver 
519f996fc96SBojan Smojver /* Number of pages/bytes we'll compress at one time. */
520f996fc96SBojan Smojver #define LZO_UNC_PAGES	32
521f996fc96SBojan Smojver #define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
522f996fc96SBojan Smojver 
523f996fc96SBojan Smojver /* Number of pages/bytes we need for compressed data (worst case). */
524f996fc96SBojan Smojver #define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
525f996fc96SBojan Smojver 			             LZO_HEADER, PAGE_SIZE)
526f996fc96SBojan Smojver #define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
527f996fc96SBojan Smojver 
528081a9d04SBojan Smojver /* Maximum number of threads for compression/decompression. */
529081a9d04SBojan Smojver #define LZO_THREADS	3
530081a9d04SBojan Smojver 
5315a21d489SBojan Smojver /* Minimum/maximum number of pages for read buffering. */
5325a21d489SBojan Smojver #define LZO_MIN_RD_PAGES	1024
5335a21d489SBojan Smojver #define LZO_MAX_RD_PAGES	8192
534081a9d04SBojan Smojver 
535081a9d04SBojan Smojver 
53661159a31SRafael J. Wysocki /**
53761159a31SRafael J. Wysocki  *	save_image - save the suspend image data
53861159a31SRafael J. Wysocki  */
53961159a31SRafael J. Wysocki 
save_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)54061159a31SRafael J. Wysocki static int save_image(struct swap_map_handle *handle,
54161159a31SRafael J. Wysocki                       struct snapshot_handle *snapshot,
5423a4f7577SAndrew Morton                       unsigned int nr_to_write)
54361159a31SRafael J. Wysocki {
54461159a31SRafael J. Wysocki 	unsigned int m;
54561159a31SRafael J. Wysocki 	int ret;
5463a4f7577SAndrew Morton 	int nr_pages;
547ab954160SAndrew Morton 	int err2;
548343df3c7SChristoph Hellwig 	struct hib_bio_batch hb;
549db597605STina Ruchandani 	ktime_t start;
550db597605STina Ruchandani 	ktime_t stop;
55161159a31SRafael J. Wysocki 
552343df3c7SChristoph Hellwig 	hib_init_batch(&hb);
553343df3c7SChristoph Hellwig 
55464ec72a1SJoe Perches 	pr_info("Saving image data pages (%u pages)...\n",
55523976728SRafael J. Wysocki 		nr_to_write);
556d8150d35SBojan Smojver 	m = nr_to_write / 10;
55761159a31SRafael J. Wysocki 	if (!m)
55861159a31SRafael J. Wysocki 		m = 1;
55961159a31SRafael J. Wysocki 	nr_pages = 0;
560db597605STina Ruchandani 	start = ktime_get();
5614ff277f9SJiri Slaby 	while (1) {
562d3c1b24cSJiri Slaby 		ret = snapshot_read_next(snapshot);
5634ff277f9SJiri Slaby 		if (ret <= 0)
5644ff277f9SJiri Slaby 			break;
565343df3c7SChristoph Hellwig 		ret = swap_write_page(handle, data_of(*snapshot), &hb);
5664ff277f9SJiri Slaby 		if (ret)
56761159a31SRafael J. Wysocki 			break;
56861159a31SRafael J. Wysocki 		if (!(nr_pages % m))
56964ec72a1SJoe Perches 			pr_info("Image saving progress: %3d%%\n",
570d8150d35SBojan Smojver 				nr_pages / m * 10);
57161159a31SRafael J. Wysocki 		nr_pages++;
57261159a31SRafael J. Wysocki 	}
573343df3c7SChristoph Hellwig 	err2 = hib_wait_io(&hb);
57455c4478aSXiaoyi Chen 	hib_finish_batch(&hb);
575db597605STina Ruchandani 	stop = ktime_get();
5764ff277f9SJiri Slaby 	if (!ret)
5774ff277f9SJiri Slaby 		ret = err2;
5784ff277f9SJiri Slaby 	if (!ret)
57964ec72a1SJoe Perches 		pr_info("Image saving done\n");
580db597605STina Ruchandani 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
5814ff277f9SJiri Slaby 	return ret;
58261159a31SRafael J. Wysocki }
58361159a31SRafael J. Wysocki 
5846b37dfcbSRandy Dunlap /*
585081a9d04SBojan Smojver  * Structure used for CRC32.
586081a9d04SBojan Smojver  */
587081a9d04SBojan Smojver struct crc_data {
588081a9d04SBojan Smojver 	struct task_struct *thr;                  /* thread */
589081a9d04SBojan Smojver 	atomic_t ready;                           /* ready to start flag */
590081a9d04SBojan Smojver 	atomic_t stop;                            /* ready to stop flag */
591081a9d04SBojan Smojver 	unsigned run_threads;                     /* nr current threads */
592081a9d04SBojan Smojver 	wait_queue_head_t go;                     /* start crc update */
593081a9d04SBojan Smojver 	wait_queue_head_t done;                   /* crc update done */
594081a9d04SBojan Smojver 	u32 *crc32;                               /* points to handle's crc32 */
595081a9d04SBojan Smojver 	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
596081a9d04SBojan Smojver 	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
597081a9d04SBojan Smojver };
598081a9d04SBojan Smojver 
5996b37dfcbSRandy Dunlap /*
600081a9d04SBojan Smojver  * CRC32 update function that runs in its own thread.
601081a9d04SBojan Smojver  */
crc32_threadfn(void * data)602081a9d04SBojan Smojver static int crc32_threadfn(void *data)
603081a9d04SBojan Smojver {
604081a9d04SBojan Smojver 	struct crc_data *d = data;
605081a9d04SBojan Smojver 	unsigned i;
606081a9d04SBojan Smojver 
607081a9d04SBojan Smojver 	while (1) {
608*7692e29dSHongchen Zhang 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
609081a9d04SBojan Smojver 		                  kthread_should_stop());
610081a9d04SBojan Smojver 		if (kthread_should_stop()) {
611081a9d04SBojan Smojver 			d->thr = NULL;
612*7692e29dSHongchen Zhang 			atomic_set_release(&d->stop, 1);
613081a9d04SBojan Smojver 			wake_up(&d->done);
614081a9d04SBojan Smojver 			break;
615081a9d04SBojan Smojver 		}
616081a9d04SBojan Smojver 		atomic_set(&d->ready, 0);
617081a9d04SBojan Smojver 
618081a9d04SBojan Smojver 		for (i = 0; i < d->run_threads; i++)
619081a9d04SBojan Smojver 			*d->crc32 = crc32_le(*d->crc32,
620081a9d04SBojan Smojver 			                     d->unc[i], *d->unc_len[i]);
621*7692e29dSHongchen Zhang 		atomic_set_release(&d->stop, 1);
622081a9d04SBojan Smojver 		wake_up(&d->done);
623081a9d04SBojan Smojver 	}
624081a9d04SBojan Smojver 	return 0;
625081a9d04SBojan Smojver }
6266b37dfcbSRandy Dunlap /*
627081a9d04SBojan Smojver  * Structure used for LZO data compression.
628081a9d04SBojan Smojver  */
629081a9d04SBojan Smojver struct cmp_data {
630081a9d04SBojan Smojver 	struct task_struct *thr;                  /* thread */
631081a9d04SBojan Smojver 	atomic_t ready;                           /* ready to start flag */
632081a9d04SBojan Smojver 	atomic_t stop;                            /* ready to stop flag */
633081a9d04SBojan Smojver 	int ret;                                  /* return code */
634081a9d04SBojan Smojver 	wait_queue_head_t go;                     /* start compression */
635081a9d04SBojan Smojver 	wait_queue_head_t done;                   /* compression done */
636081a9d04SBojan Smojver 	size_t unc_len;                           /* uncompressed length */
637081a9d04SBojan Smojver 	size_t cmp_len;                           /* compressed length */
638081a9d04SBojan Smojver 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
639081a9d04SBojan Smojver 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
640081a9d04SBojan Smojver 	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
641081a9d04SBojan Smojver };
642081a9d04SBojan Smojver 
6436b37dfcbSRandy Dunlap /*
644081a9d04SBojan Smojver  * Compression function that runs in its own thread.
645081a9d04SBojan Smojver  */
lzo_compress_threadfn(void * data)646081a9d04SBojan Smojver static int lzo_compress_threadfn(void *data)
647081a9d04SBojan Smojver {
648081a9d04SBojan Smojver 	struct cmp_data *d = data;
649081a9d04SBojan Smojver 
650081a9d04SBojan Smojver 	while (1) {
651*7692e29dSHongchen Zhang 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
652081a9d04SBojan Smojver 		                  kthread_should_stop());
653081a9d04SBojan Smojver 		if (kthread_should_stop()) {
654081a9d04SBojan Smojver 			d->thr = NULL;
655081a9d04SBojan Smojver 			d->ret = -1;
656*7692e29dSHongchen Zhang 			atomic_set_release(&d->stop, 1);
657081a9d04SBojan Smojver 			wake_up(&d->done);
658081a9d04SBojan Smojver 			break;
659081a9d04SBojan Smojver 		}
660081a9d04SBojan Smojver 		atomic_set(&d->ready, 0);
661081a9d04SBojan Smojver 
662081a9d04SBojan Smojver 		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
663081a9d04SBojan Smojver 		                          d->cmp + LZO_HEADER, &d->cmp_len,
664081a9d04SBojan Smojver 		                          d->wrk);
665*7692e29dSHongchen Zhang 		atomic_set_release(&d->stop, 1);
666081a9d04SBojan Smojver 		wake_up(&d->done);
667081a9d04SBojan Smojver 	}
668081a9d04SBojan Smojver 	return 0;
669081a9d04SBojan Smojver }
670f996fc96SBojan Smojver 
671f996fc96SBojan Smojver /**
672f996fc96SBojan Smojver  * save_image_lzo - Save the suspend image data compressed with LZO.
673057b0a75SNiv Yehezkel  * @handle: Swap map handle to use for saving the image.
674f996fc96SBojan Smojver  * @snapshot: Image to read data from.
675f996fc96SBojan Smojver  * @nr_to_write: Number of pages to save.
676f996fc96SBojan Smojver  */
save_image_lzo(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)677f996fc96SBojan Smojver static int save_image_lzo(struct swap_map_handle *handle,
678f996fc96SBojan Smojver                           struct snapshot_handle *snapshot,
679f996fc96SBojan Smojver                           unsigned int nr_to_write)
680f996fc96SBojan Smojver {
681f996fc96SBojan Smojver 	unsigned int m;
682f996fc96SBojan Smojver 	int ret = 0;
683f996fc96SBojan Smojver 	int nr_pages;
684f996fc96SBojan Smojver 	int err2;
685343df3c7SChristoph Hellwig 	struct hib_bio_batch hb;
686db597605STina Ruchandani 	ktime_t start;
687db597605STina Ruchandani 	ktime_t stop;
688081a9d04SBojan Smojver 	size_t off;
689081a9d04SBojan Smojver 	unsigned thr, run_threads, nr_threads;
690081a9d04SBojan Smojver 	unsigned char *page = NULL;
691081a9d04SBojan Smojver 	struct cmp_data *data = NULL;
692081a9d04SBojan Smojver 	struct crc_data *crc = NULL;
693081a9d04SBojan Smojver 
694343df3c7SChristoph Hellwig 	hib_init_batch(&hb);
695343df3c7SChristoph Hellwig 
696081a9d04SBojan Smojver 	/*
697081a9d04SBojan Smojver 	 * We'll limit the number of threads for compression to limit memory
698081a9d04SBojan Smojver 	 * footprint.
699081a9d04SBojan Smojver 	 */
700081a9d04SBojan Smojver 	nr_threads = num_online_cpus() - 1;
701081a9d04SBojan Smojver 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
702f996fc96SBojan Smojver 
7030eb0b63cSChristoph Hellwig 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
704f996fc96SBojan Smojver 	if (!page) {
70564ec72a1SJoe Perches 		pr_err("Failed to allocate LZO page\n");
706081a9d04SBojan Smojver 		ret = -ENOMEM;
707081a9d04SBojan Smojver 		goto out_clean;
708f996fc96SBojan Smojver 	}
709f996fc96SBojan Smojver 
7109437e393SCai Huoqing 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
711081a9d04SBojan Smojver 	if (!data) {
71264ec72a1SJoe Perches 		pr_err("Failed to allocate LZO data\n");
713081a9d04SBojan Smojver 		ret = -ENOMEM;
714081a9d04SBojan Smojver 		goto out_clean;
715081a9d04SBojan Smojver 	}
716081a9d04SBojan Smojver 
7179437e393SCai Huoqing 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
718081a9d04SBojan Smojver 	if (!crc) {
71964ec72a1SJoe Perches 		pr_err("Failed to allocate crc\n");
720081a9d04SBojan Smojver 		ret = -ENOMEM;
721081a9d04SBojan Smojver 		goto out_clean;
722081a9d04SBojan Smojver 	}
723081a9d04SBojan Smojver 
724081a9d04SBojan Smojver 	/*
725081a9d04SBojan Smojver 	 * Start the compression threads.
726081a9d04SBojan Smojver 	 */
727081a9d04SBojan Smojver 	for (thr = 0; thr < nr_threads; thr++) {
728081a9d04SBojan Smojver 		init_waitqueue_head(&data[thr].go);
729081a9d04SBojan Smojver 		init_waitqueue_head(&data[thr].done);
730081a9d04SBojan Smojver 
731081a9d04SBojan Smojver 		data[thr].thr = kthread_run(lzo_compress_threadfn,
732081a9d04SBojan Smojver 		                            &data[thr],
733081a9d04SBojan Smojver 		                            "image_compress/%u", thr);
734081a9d04SBojan Smojver 		if (IS_ERR(data[thr].thr)) {
735081a9d04SBojan Smojver 			data[thr].thr = NULL;
73664ec72a1SJoe Perches 			pr_err("Cannot start compression threads\n");
737081a9d04SBojan Smojver 			ret = -ENOMEM;
738081a9d04SBojan Smojver 			goto out_clean;
739081a9d04SBojan Smojver 		}
740f996fc96SBojan Smojver 	}
741f996fc96SBojan Smojver 
742081a9d04SBojan Smojver 	/*
743081a9d04SBojan Smojver 	 * Start the CRC32 thread.
744081a9d04SBojan Smojver 	 */
745081a9d04SBojan Smojver 	init_waitqueue_head(&crc->go);
746081a9d04SBojan Smojver 	init_waitqueue_head(&crc->done);
747081a9d04SBojan Smojver 
748081a9d04SBojan Smojver 	handle->crc32 = 0;
749081a9d04SBojan Smojver 	crc->crc32 = &handle->crc32;
750081a9d04SBojan Smojver 	for (thr = 0; thr < nr_threads; thr++) {
751081a9d04SBojan Smojver 		crc->unc[thr] = data[thr].unc;
752081a9d04SBojan Smojver 		crc->unc_len[thr] = &data[thr].unc_len;
753f996fc96SBojan Smojver 	}
754f996fc96SBojan Smojver 
755081a9d04SBojan Smojver 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
756081a9d04SBojan Smojver 	if (IS_ERR(crc->thr)) {
757081a9d04SBojan Smojver 		crc->thr = NULL;
75864ec72a1SJoe Perches 		pr_err("Cannot start CRC32 thread\n");
759081a9d04SBojan Smojver 		ret = -ENOMEM;
760081a9d04SBojan Smojver 		goto out_clean;
761f996fc96SBojan Smojver 	}
762f996fc96SBojan Smojver 
7635a21d489SBojan Smojver 	/*
7645a21d489SBojan Smojver 	 * Adjust the number of required free pages after all allocations have
7655a21d489SBojan Smojver 	 * been done. We don't want to run out of pages when writing.
7665a21d489SBojan Smojver 	 */
7675a21d489SBojan Smojver 	handle->reqd_free_pages = reqd_free_pages();
7685a21d489SBojan Smojver 
76964ec72a1SJoe Perches 	pr_info("Using %u thread(s) for compression\n", nr_threads);
77064ec72a1SJoe Perches 	pr_info("Compressing and saving image data (%u pages)...\n",
77164ec72a1SJoe Perches 		nr_to_write);
772d8150d35SBojan Smojver 	m = nr_to_write / 10;
773f996fc96SBojan Smojver 	if (!m)
774f996fc96SBojan Smojver 		m = 1;
775f996fc96SBojan Smojver 	nr_pages = 0;
776db597605STina Ruchandani 	start = ktime_get();
777f996fc96SBojan Smojver 	for (;;) {
778081a9d04SBojan Smojver 		for (thr = 0; thr < nr_threads; thr++) {
779f996fc96SBojan Smojver 			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
780f996fc96SBojan Smojver 				ret = snapshot_read_next(snapshot);
781f996fc96SBojan Smojver 				if (ret < 0)
782f996fc96SBojan Smojver 					goto out_finish;
783f996fc96SBojan Smojver 
784f996fc96SBojan Smojver 				if (!ret)
785f996fc96SBojan Smojver 					break;
786f996fc96SBojan Smojver 
787081a9d04SBojan Smojver 				memcpy(data[thr].unc + off,
788081a9d04SBojan Smojver 				       data_of(*snapshot), PAGE_SIZE);
789f996fc96SBojan Smojver 
790f996fc96SBojan Smojver 				if (!(nr_pages % m))
79164ec72a1SJoe Perches 					pr_info("Image saving progress: %3d%%\n",
792d8150d35SBojan Smojver 						nr_pages / m * 10);
793f996fc96SBojan Smojver 				nr_pages++;
794f996fc96SBojan Smojver 			}
795f996fc96SBojan Smojver 			if (!off)
796f996fc96SBojan Smojver 				break;
797f996fc96SBojan Smojver 
798081a9d04SBojan Smojver 			data[thr].unc_len = off;
799081a9d04SBojan Smojver 
800*7692e29dSHongchen Zhang 			atomic_set_release(&data[thr].ready, 1);
801081a9d04SBojan Smojver 			wake_up(&data[thr].go);
802081a9d04SBojan Smojver 		}
803081a9d04SBojan Smojver 
804081a9d04SBojan Smojver 		if (!thr)
805081a9d04SBojan Smojver 			break;
806081a9d04SBojan Smojver 
807081a9d04SBojan Smojver 		crc->run_threads = thr;
808*7692e29dSHongchen Zhang 		atomic_set_release(&crc->ready, 1);
809081a9d04SBojan Smojver 		wake_up(&crc->go);
810081a9d04SBojan Smojver 
811081a9d04SBojan Smojver 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
812081a9d04SBojan Smojver 			wait_event(data[thr].done,
813*7692e29dSHongchen Zhang 				atomic_read_acquire(&data[thr].stop));
814081a9d04SBojan Smojver 			atomic_set(&data[thr].stop, 0);
815081a9d04SBojan Smojver 
816081a9d04SBojan Smojver 			ret = data[thr].ret;
817081a9d04SBojan Smojver 
818f996fc96SBojan Smojver 			if (ret < 0) {
81964ec72a1SJoe Perches 				pr_err("LZO compression failed\n");
820081a9d04SBojan Smojver 				goto out_finish;
821f996fc96SBojan Smojver 			}
822f996fc96SBojan Smojver 
823081a9d04SBojan Smojver 			if (unlikely(!data[thr].cmp_len ||
824081a9d04SBojan Smojver 			             data[thr].cmp_len >
825081a9d04SBojan Smojver 			             lzo1x_worst_compress(data[thr].unc_len))) {
82664ec72a1SJoe Perches 				pr_err("Invalid LZO compressed length\n");
827f996fc96SBojan Smojver 				ret = -1;
828081a9d04SBojan Smojver 				goto out_finish;
829f996fc96SBojan Smojver 			}
830f996fc96SBojan Smojver 
831081a9d04SBojan Smojver 			*(size_t *)data[thr].cmp = data[thr].cmp_len;
832f996fc96SBojan Smojver 
833f996fc96SBojan Smojver 			/*
834081a9d04SBojan Smojver 			 * Given we are writing one page at a time to disk, we
835081a9d04SBojan Smojver 			 * copy that much from the buffer, although the last
836081a9d04SBojan Smojver 			 * bit will likely be smaller than full page. This is
837081a9d04SBojan Smojver 			 * OK - we saved the length of the compressed data, so
838081a9d04SBojan Smojver 			 * any garbage at the end will be discarded when we
839081a9d04SBojan Smojver 			 * read it.
840f996fc96SBojan Smojver 			 */
841081a9d04SBojan Smojver 			for (off = 0;
842081a9d04SBojan Smojver 			     off < LZO_HEADER + data[thr].cmp_len;
843081a9d04SBojan Smojver 			     off += PAGE_SIZE) {
844081a9d04SBojan Smojver 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
845f996fc96SBojan Smojver 
846343df3c7SChristoph Hellwig 				ret = swap_write_page(handle, page, &hb);
847f996fc96SBojan Smojver 				if (ret)
848f996fc96SBojan Smojver 					goto out_finish;
849f996fc96SBojan Smojver 			}
850f996fc96SBojan Smojver 		}
851f996fc96SBojan Smojver 
852*7692e29dSHongchen Zhang 		wait_event(crc->done, atomic_read_acquire(&crc->stop));
853081a9d04SBojan Smojver 		atomic_set(&crc->stop, 0);
854081a9d04SBojan Smojver 	}
855081a9d04SBojan Smojver 
856f996fc96SBojan Smojver out_finish:
857343df3c7SChristoph Hellwig 	err2 = hib_wait_io(&hb);
858db597605STina Ruchandani 	stop = ktime_get();
859f996fc96SBojan Smojver 	if (!ret)
860f996fc96SBojan Smojver 		ret = err2;
861d8150d35SBojan Smojver 	if (!ret)
86264ec72a1SJoe Perches 		pr_info("Image saving done\n");
863db597605STina Ruchandani 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
864081a9d04SBojan Smojver out_clean:
86555c4478aSXiaoyi Chen 	hib_finish_batch(&hb);
866081a9d04SBojan Smojver 	if (crc) {
867081a9d04SBojan Smojver 		if (crc->thr)
868081a9d04SBojan Smojver 			kthread_stop(crc->thr);
869081a9d04SBojan Smojver 		kfree(crc);
870081a9d04SBojan Smojver 	}
871081a9d04SBojan Smojver 	if (data) {
872081a9d04SBojan Smojver 		for (thr = 0; thr < nr_threads; thr++)
873081a9d04SBojan Smojver 			if (data[thr].thr)
874081a9d04SBojan Smojver 				kthread_stop(data[thr].thr);
875081a9d04SBojan Smojver 		vfree(data);
876081a9d04SBojan Smojver 	}
877081a9d04SBojan Smojver 	if (page) free_page((unsigned long)page);
878f996fc96SBojan Smojver 
879f996fc96SBojan Smojver 	return ret;
880f996fc96SBojan Smojver }
881f996fc96SBojan Smojver 
88261159a31SRafael J. Wysocki /**
88361159a31SRafael J. Wysocki  *	enough_swap - Make sure we have enough swap to save the image.
88461159a31SRafael J. Wysocki  *
88561159a31SRafael J. Wysocki  *	Returns TRUE or FALSE after checking the total amount of swap
886e4b2897aSLu Jialin  *	space available from the resume partition.
88761159a31SRafael J. Wysocki  */
88861159a31SRafael J. Wysocki 
enough_swap(unsigned int nr_pages)8898ffdfe35SKyungsik Lee static int enough_swap(unsigned int nr_pages)
89061159a31SRafael J. Wysocki {
89161159a31SRafael J. Wysocki 	unsigned int free_swap = count_swap_pages(root_swap, 1);
892f996fc96SBojan Smojver 	unsigned int required;
89361159a31SRafael J. Wysocki 
89464ec72a1SJoe Perches 	pr_debug("Free swap pages: %u\n", free_swap);
895f996fc96SBojan Smojver 
896ee34a370SBarry Song 	required = PAGES_FOR_IO + nr_pages;
897f996fc96SBojan Smojver 	return free_swap > required;
89861159a31SRafael J. Wysocki }
89961159a31SRafael J. Wysocki 
90061159a31SRafael J. Wysocki /**
90161159a31SRafael J. Wysocki  *	swsusp_write - Write entire image and metadata.
902a634cc10SRafael J. Wysocki  *	@flags: flags to pass to the "boot" kernel in the image header
90361159a31SRafael J. Wysocki  *
90461159a31SRafael J. Wysocki  *	It is important _NOT_ to umount filesystems at this point. We want
90561159a31SRafael J. Wysocki  *	them synced (in case something goes wrong) but we DO not want to mark
90661159a31SRafael J. Wysocki  *	filesystem clean: it is not. (And it does not matter, if we resume
90761159a31SRafael J. Wysocki  *	correctly, we'll mark system clean, anyway.)
90861159a31SRafael J. Wysocki  */
90961159a31SRafael J. Wysocki 
swsusp_write(unsigned int flags)910a634cc10SRafael J. Wysocki int swsusp_write(unsigned int flags)
91161159a31SRafael J. Wysocki {
91261159a31SRafael J. Wysocki 	struct swap_map_handle handle;
91361159a31SRafael J. Wysocki 	struct snapshot_handle snapshot;
91461159a31SRafael J. Wysocki 	struct swsusp_info *header;
9156f612af5SJiri Slaby 	unsigned long pages;
91661159a31SRafael J. Wysocki 	int error;
91761159a31SRafael J. Wysocki 
9186f612af5SJiri Slaby 	pages = snapshot_get_image_size();
9196f612af5SJiri Slaby 	error = get_swap_writer(&handle);
9203aef83e0SRafael J. Wysocki 	if (error) {
92164ec72a1SJoe Perches 		pr_err("Cannot get swap writer\n");
92261159a31SRafael J. Wysocki 		return error;
92361159a31SRafael J. Wysocki 	}
924ee34a370SBarry Song 	if (flags & SF_NOCOMPRESS_MODE) {
9258ffdfe35SKyungsik Lee 		if (!enough_swap(pages)) {
92664ec72a1SJoe Perches 			pr_err("Not enough free swap\n");
9276f612af5SJiri Slaby 			error = -ENOSPC;
9286f612af5SJiri Slaby 			goto out_finish;
9296f612af5SJiri Slaby 		}
930ee34a370SBarry Song 	}
93161159a31SRafael J. Wysocki 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
932d3c1b24cSJiri Slaby 	error = snapshot_read_next(&snapshot);
933d5641c64SChengguang Xu 	if (error < (int)PAGE_SIZE) {
9343aef83e0SRafael J. Wysocki 		if (error >= 0)
9353aef83e0SRafael J. Wysocki 			error = -EFAULT;
9363aef83e0SRafael J. Wysocki 
9376f612af5SJiri Slaby 		goto out_finish;
9383aef83e0SRafael J. Wysocki 	}
93961159a31SRafael J. Wysocki 	header = (struct swsusp_info *)data_of(snapshot);
940ab954160SAndrew Morton 	error = swap_write_page(&handle, header, NULL);
941f996fc96SBojan Smojver 	if (!error) {
942f996fc96SBojan Smojver 		error = (flags & SF_NOCOMPRESS_MODE) ?
943f996fc96SBojan Smojver 			save_image(&handle, &snapshot, pages - 1) :
944f996fc96SBojan Smojver 			save_image_lzo(&handle, &snapshot, pages - 1);
945f996fc96SBojan Smojver 	}
9466f612af5SJiri Slaby out_finish:
9476f612af5SJiri Slaby 	error = swap_writer_finish(&handle, flags, error);
94861159a31SRafael J. Wysocki 	return error;
94961159a31SRafael J. Wysocki }
95061159a31SRafael J. Wysocki 
9516b37dfcbSRandy Dunlap /*
95261159a31SRafael J. Wysocki  *	The following functions allow us to read data using a swap map
9536b37dfcbSRandy Dunlap  *	in a file-like way.
95461159a31SRafael J. Wysocki  */
95561159a31SRafael J. Wysocki 
release_swap_reader(struct swap_map_handle * handle)95661159a31SRafael J. Wysocki static void release_swap_reader(struct swap_map_handle *handle)
95761159a31SRafael J. Wysocki {
958081a9d04SBojan Smojver 	struct swap_map_page_list *tmp;
959081a9d04SBojan Smojver 
960081a9d04SBojan Smojver 	while (handle->maps) {
961081a9d04SBojan Smojver 		if (handle->maps->map)
962081a9d04SBojan Smojver 			free_page((unsigned long)handle->maps->map);
963081a9d04SBojan Smojver 		tmp = handle->maps;
964081a9d04SBojan Smojver 		handle->maps = handle->maps->next;
965081a9d04SBojan Smojver 		kfree(tmp);
966081a9d04SBojan Smojver 	}
96761159a31SRafael J. Wysocki 	handle->cur = NULL;
96861159a31SRafael J. Wysocki }
96961159a31SRafael J. Wysocki 
get_swap_reader(struct swap_map_handle * handle,unsigned int * flags_p)9706f612af5SJiri Slaby static int get_swap_reader(struct swap_map_handle *handle,
9716f612af5SJiri Slaby 		unsigned int *flags_p)
97261159a31SRafael J. Wysocki {
97361159a31SRafael J. Wysocki 	int error;
974081a9d04SBojan Smojver 	struct swap_map_page_list *tmp, *last;
975081a9d04SBojan Smojver 	sector_t offset;
97661159a31SRafael J. Wysocki 
9776f612af5SJiri Slaby 	*flags_p = swsusp_header->flags;
9786f612af5SJiri Slaby 
9796f612af5SJiri Slaby 	if (!swsusp_header->image) /* how can this happen? */
98061159a31SRafael J. Wysocki 		return -EINVAL;
9813aef83e0SRafael J. Wysocki 
982081a9d04SBojan Smojver 	handle->cur = NULL;
983081a9d04SBojan Smojver 	last = handle->maps = NULL;
984081a9d04SBojan Smojver 	offset = swsusp_header->image;
985081a9d04SBojan Smojver 	while (offset) {
9862f02a7ecSFuqian Huang 		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
987081a9d04SBojan Smojver 		if (!tmp) {
988081a9d04SBojan Smojver 			release_swap_reader(handle);
98961159a31SRafael J. Wysocki 			return -ENOMEM;
990081a9d04SBojan Smojver 		}
991081a9d04SBojan Smojver 		if (!handle->maps)
992081a9d04SBojan Smojver 			handle->maps = tmp;
993081a9d04SBojan Smojver 		if (last)
994081a9d04SBojan Smojver 			last->next = tmp;
995081a9d04SBojan Smojver 		last = tmp;
9963aef83e0SRafael J. Wysocki 
997081a9d04SBojan Smojver 		tmp->map = (struct swap_map_page *)
9980eb0b63cSChristoph Hellwig 			   __get_free_page(GFP_NOIO | __GFP_HIGH);
999081a9d04SBojan Smojver 		if (!tmp->map) {
1000081a9d04SBojan Smojver 			release_swap_reader(handle);
1001081a9d04SBojan Smojver 			return -ENOMEM;
1002081a9d04SBojan Smojver 		}
1003081a9d04SBojan Smojver 
1004568e34edSBart Van Assche 		error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
100561159a31SRafael J. Wysocki 		if (error) {
100661159a31SRafael J. Wysocki 			release_swap_reader(handle);
100761159a31SRafael J. Wysocki 			return error;
100861159a31SRafael J. Wysocki 		}
1009081a9d04SBojan Smojver 		offset = tmp->map->next_swap;
1010081a9d04SBojan Smojver 	}
101161159a31SRafael J. Wysocki 	handle->k = 0;
1012081a9d04SBojan Smojver 	handle->cur = handle->maps->map;
101361159a31SRafael J. Wysocki 	return 0;
101461159a31SRafael J. Wysocki }
101561159a31SRafael J. Wysocki 
swap_read_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)1016546e0d27SAndrew Morton static int swap_read_page(struct swap_map_handle *handle, void *buf,
1017343df3c7SChristoph Hellwig 		struct hib_bio_batch *hb)
101861159a31SRafael J. Wysocki {
10193aef83e0SRafael J. Wysocki 	sector_t offset;
102061159a31SRafael J. Wysocki 	int error;
1021081a9d04SBojan Smojver 	struct swap_map_page_list *tmp;
102261159a31SRafael J. Wysocki 
102361159a31SRafael J. Wysocki 	if (!handle->cur)
102461159a31SRafael J. Wysocki 		return -EINVAL;
102561159a31SRafael J. Wysocki 	offset = handle->cur->entries[handle->k];
102661159a31SRafael J. Wysocki 	if (!offset)
102761159a31SRafael J. Wysocki 		return -EFAULT;
1028568e34edSBart Van Assche 	error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
102961159a31SRafael J. Wysocki 	if (error)
103061159a31SRafael J. Wysocki 		return error;
103161159a31SRafael J. Wysocki 	if (++handle->k >= MAP_PAGE_ENTRIES) {
103261159a31SRafael J. Wysocki 		handle->k = 0;
1033081a9d04SBojan Smojver 		free_page((unsigned long)handle->maps->map);
1034081a9d04SBojan Smojver 		tmp = handle->maps;
1035081a9d04SBojan Smojver 		handle->maps = handle->maps->next;
1036081a9d04SBojan Smojver 		kfree(tmp);
1037081a9d04SBojan Smojver 		if (!handle->maps)
103861159a31SRafael J. Wysocki 			release_swap_reader(handle);
1039081a9d04SBojan Smojver 		else
1040081a9d04SBojan Smojver 			handle->cur = handle->maps->map;
104161159a31SRafael J. Wysocki 	}
104261159a31SRafael J. Wysocki 	return error;
104361159a31SRafael J. Wysocki }
104461159a31SRafael J. Wysocki 
swap_reader_finish(struct swap_map_handle * handle)10456f612af5SJiri Slaby static int swap_reader_finish(struct swap_map_handle *handle)
10466f612af5SJiri Slaby {
10476f612af5SJiri Slaby 	release_swap_reader(handle);
10486f612af5SJiri Slaby 
10496f612af5SJiri Slaby 	return 0;
10506f612af5SJiri Slaby }
10516f612af5SJiri Slaby 
105261159a31SRafael J. Wysocki /**
105361159a31SRafael J. Wysocki  *	load_image - load the image using the swap map handle
105461159a31SRafael J. Wysocki  *	@handle and the snapshot handle @snapshot
105561159a31SRafael J. Wysocki  *	(assume there are @nr_pages pages to load)
105661159a31SRafael J. Wysocki  */
105761159a31SRafael J. Wysocki 
load_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)105861159a31SRafael J. Wysocki static int load_image(struct swap_map_handle *handle,
105961159a31SRafael J. Wysocki                       struct snapshot_handle *snapshot,
1060546e0d27SAndrew Morton                       unsigned int nr_to_read)
106161159a31SRafael J. Wysocki {
106261159a31SRafael J. Wysocki 	unsigned int m;
1063081a9d04SBojan Smojver 	int ret = 0;
1064db597605STina Ruchandani 	ktime_t start;
1065db597605STina Ruchandani 	ktime_t stop;
1066343df3c7SChristoph Hellwig 	struct hib_bio_batch hb;
1067546e0d27SAndrew Morton 	int err2;
1068546e0d27SAndrew Morton 	unsigned nr_pages;
106961159a31SRafael J. Wysocki 
1070343df3c7SChristoph Hellwig 	hib_init_batch(&hb);
1071343df3c7SChristoph Hellwig 
1072f6cf0545SJames Morse 	clean_pages_on_read = true;
107364ec72a1SJoe Perches 	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1074d8150d35SBojan Smojver 	m = nr_to_read / 10;
107561159a31SRafael J. Wysocki 	if (!m)
107661159a31SRafael J. Wysocki 		m = 1;
107761159a31SRafael J. Wysocki 	nr_pages = 0;
1078db597605STina Ruchandani 	start = ktime_get();
1079546e0d27SAndrew Morton 	for ( ; ; ) {
1080081a9d04SBojan Smojver 		ret = snapshot_write_next(snapshot);
1081081a9d04SBojan Smojver 		if (ret <= 0)
1082546e0d27SAndrew Morton 			break;
1083343df3c7SChristoph Hellwig 		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1084081a9d04SBojan Smojver 		if (ret)
1085546e0d27SAndrew Morton 			break;
1086546e0d27SAndrew Morton 		if (snapshot->sync_read)
1087343df3c7SChristoph Hellwig 			ret = hib_wait_io(&hb);
1088081a9d04SBojan Smojver 		if (ret)
108961159a31SRafael J. Wysocki 			break;
109061159a31SRafael J. Wysocki 		if (!(nr_pages % m))
109164ec72a1SJoe Perches 			pr_info("Image loading progress: %3d%%\n",
1092d8150d35SBojan Smojver 				nr_pages / m * 10);
109361159a31SRafael J. Wysocki 		nr_pages++;
109461159a31SRafael J. Wysocki 	}
1095343df3c7SChristoph Hellwig 	err2 = hib_wait_io(&hb);
109655c4478aSXiaoyi Chen 	hib_finish_batch(&hb);
1097db597605STina Ruchandani 	stop = ktime_get();
1098081a9d04SBojan Smojver 	if (!ret)
1099081a9d04SBojan Smojver 		ret = err2;
1100081a9d04SBojan Smojver 	if (!ret) {
110164ec72a1SJoe Perches 		pr_info("Image loading done\n");
11028357376dSRafael J. Wysocki 		snapshot_write_finalize(snapshot);
110361159a31SRafael J. Wysocki 		if (!snapshot_image_loaded(snapshot))
1104081a9d04SBojan Smojver 			ret = -ENODATA;
1105d8150d35SBojan Smojver 	}
1106db597605STina Ruchandani 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1107081a9d04SBojan Smojver 	return ret;
1108081a9d04SBojan Smojver }
1109081a9d04SBojan Smojver 
11106b37dfcbSRandy Dunlap /*
1111081a9d04SBojan Smojver  * Structure used for LZO data decompression.
1112081a9d04SBojan Smojver  */
1113081a9d04SBojan Smojver struct dec_data {
1114081a9d04SBojan Smojver 	struct task_struct *thr;                  /* thread */
1115081a9d04SBojan Smojver 	atomic_t ready;                           /* ready to start flag */
1116081a9d04SBojan Smojver 	atomic_t stop;                            /* ready to stop flag */
1117081a9d04SBojan Smojver 	int ret;                                  /* return code */
1118081a9d04SBojan Smojver 	wait_queue_head_t go;                     /* start decompression */
1119081a9d04SBojan Smojver 	wait_queue_head_t done;                   /* decompression done */
1120081a9d04SBojan Smojver 	size_t unc_len;                           /* uncompressed length */
1121081a9d04SBojan Smojver 	size_t cmp_len;                           /* compressed length */
1122081a9d04SBojan Smojver 	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1123081a9d04SBojan Smojver 	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1124081a9d04SBojan Smojver };
1125081a9d04SBojan Smojver 
11266b37dfcbSRandy Dunlap /*
11276be2408aSZhen Lei  * Decompression function that runs in its own thread.
1128081a9d04SBojan Smojver  */
lzo_decompress_threadfn(void * data)1129081a9d04SBojan Smojver static int lzo_decompress_threadfn(void *data)
1130081a9d04SBojan Smojver {
1131081a9d04SBojan Smojver 	struct dec_data *d = data;
1132081a9d04SBojan Smojver 
1133081a9d04SBojan Smojver 	while (1) {
1134*7692e29dSHongchen Zhang 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
1135081a9d04SBojan Smojver 		                  kthread_should_stop());
1136081a9d04SBojan Smojver 		if (kthread_should_stop()) {
1137081a9d04SBojan Smojver 			d->thr = NULL;
1138081a9d04SBojan Smojver 			d->ret = -1;
1139*7692e29dSHongchen Zhang 			atomic_set_release(&d->stop, 1);
1140081a9d04SBojan Smojver 			wake_up(&d->done);
1141081a9d04SBojan Smojver 			break;
1142081a9d04SBojan Smojver 		}
1143081a9d04SBojan Smojver 		atomic_set(&d->ready, 0);
1144081a9d04SBojan Smojver 
1145081a9d04SBojan Smojver 		d->unc_len = LZO_UNC_SIZE;
1146081a9d04SBojan Smojver 		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1147081a9d04SBojan Smojver 		                               d->unc, &d->unc_len);
1148f6cf0545SJames Morse 		if (clean_pages_on_decompress)
1149f6cf0545SJames Morse 			flush_icache_range((unsigned long)d->unc,
1150f6cf0545SJames Morse 					   (unsigned long)d->unc + d->unc_len);
1151f6cf0545SJames Morse 
1152*7692e29dSHongchen Zhang 		atomic_set_release(&d->stop, 1);
1153081a9d04SBojan Smojver 		wake_up(&d->done);
1154081a9d04SBojan Smojver 	}
1155081a9d04SBojan Smojver 	return 0;
115661159a31SRafael J. Wysocki }
115761159a31SRafael J. Wysocki 
1158a634cc10SRafael J. Wysocki /**
1159f996fc96SBojan Smojver  * load_image_lzo - Load compressed image data and decompress them with LZO.
1160f996fc96SBojan Smojver  * @handle: Swap map handle to use for loading data.
1161f996fc96SBojan Smojver  * @snapshot: Image to copy uncompressed data into.
1162f996fc96SBojan Smojver  * @nr_to_read: Number of pages to load.
1163f996fc96SBojan Smojver  */
load_image_lzo(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)1164f996fc96SBojan Smojver static int load_image_lzo(struct swap_map_handle *handle,
1165f996fc96SBojan Smojver                           struct snapshot_handle *snapshot,
1166f996fc96SBojan Smojver                           unsigned int nr_to_read)
1167f996fc96SBojan Smojver {
1168f996fc96SBojan Smojver 	unsigned int m;
1169081a9d04SBojan Smojver 	int ret = 0;
1170081a9d04SBojan Smojver 	int eof = 0;
1171343df3c7SChristoph Hellwig 	struct hib_bio_batch hb;
1172db597605STina Ruchandani 	ktime_t start;
1173db597605STina Ruchandani 	ktime_t stop;
1174f996fc96SBojan Smojver 	unsigned nr_pages;
1175081a9d04SBojan Smojver 	size_t off;
1176081a9d04SBojan Smojver 	unsigned i, thr, run_threads, nr_threads;
1177081a9d04SBojan Smojver 	unsigned ring = 0, pg = 0, ring_size = 0,
1178081a9d04SBojan Smojver 	         have = 0, want, need, asked = 0;
11795a21d489SBojan Smojver 	unsigned long read_pages = 0;
1180081a9d04SBojan Smojver 	unsigned char **page = NULL;
1181081a9d04SBojan Smojver 	struct dec_data *data = NULL;
1182081a9d04SBojan Smojver 	struct crc_data *crc = NULL;
1183f996fc96SBojan Smojver 
1184343df3c7SChristoph Hellwig 	hib_init_batch(&hb);
1185343df3c7SChristoph Hellwig 
1186081a9d04SBojan Smojver 	/*
1187081a9d04SBojan Smojver 	 * We'll limit the number of threads for decompression to limit memory
1188081a9d04SBojan Smojver 	 * footprint.
1189081a9d04SBojan Smojver 	 */
1190081a9d04SBojan Smojver 	nr_threads = num_online_cpus() - 1;
1191081a9d04SBojan Smojver 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1192081a9d04SBojan Smojver 
119342bc47b3SKees Cook 	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1194081a9d04SBojan Smojver 	if (!page) {
119564ec72a1SJoe Perches 		pr_err("Failed to allocate LZO page\n");
1196081a9d04SBojan Smojver 		ret = -ENOMEM;
1197081a9d04SBojan Smojver 		goto out_clean;
1198081a9d04SBojan Smojver 	}
11999f339cafSBojan Smojver 
12009437e393SCai Huoqing 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
1201081a9d04SBojan Smojver 	if (!data) {
120264ec72a1SJoe Perches 		pr_err("Failed to allocate LZO data\n");
1203081a9d04SBojan Smojver 		ret = -ENOMEM;
1204081a9d04SBojan Smojver 		goto out_clean;
1205081a9d04SBojan Smojver 	}
12069f339cafSBojan Smojver 
12079437e393SCai Huoqing 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1208081a9d04SBojan Smojver 	if (!crc) {
120964ec72a1SJoe Perches 		pr_err("Failed to allocate crc\n");
1210081a9d04SBojan Smojver 		ret = -ENOMEM;
1211081a9d04SBojan Smojver 		goto out_clean;
1212081a9d04SBojan Smojver 	}
1213081a9d04SBojan Smojver 
1214f6cf0545SJames Morse 	clean_pages_on_decompress = true;
1215f6cf0545SJames Morse 
1216081a9d04SBojan Smojver 	/*
1217081a9d04SBojan Smojver 	 * Start the decompression threads.
1218081a9d04SBojan Smojver 	 */
1219081a9d04SBojan Smojver 	for (thr = 0; thr < nr_threads; thr++) {
1220081a9d04SBojan Smojver 		init_waitqueue_head(&data[thr].go);
1221081a9d04SBojan Smojver 		init_waitqueue_head(&data[thr].done);
1222081a9d04SBojan Smojver 
1223081a9d04SBojan Smojver 		data[thr].thr = kthread_run(lzo_decompress_threadfn,
1224081a9d04SBojan Smojver 		                            &data[thr],
1225081a9d04SBojan Smojver 		                            "image_decompress/%u", thr);
1226081a9d04SBojan Smojver 		if (IS_ERR(data[thr].thr)) {
1227081a9d04SBojan Smojver 			data[thr].thr = NULL;
122864ec72a1SJoe Perches 			pr_err("Cannot start decompression threads\n");
1229081a9d04SBojan Smojver 			ret = -ENOMEM;
1230081a9d04SBojan Smojver 			goto out_clean;
1231f996fc96SBojan Smojver 		}
12329f339cafSBojan Smojver 	}
1233f996fc96SBojan Smojver 
1234081a9d04SBojan Smojver 	/*
1235081a9d04SBojan Smojver 	 * Start the CRC32 thread.
1236081a9d04SBojan Smojver 	 */
1237081a9d04SBojan Smojver 	init_waitqueue_head(&crc->go);
1238081a9d04SBojan Smojver 	init_waitqueue_head(&crc->done);
12399f339cafSBojan Smojver 
1240081a9d04SBojan Smojver 	handle->crc32 = 0;
1241081a9d04SBojan Smojver 	crc->crc32 = &handle->crc32;
1242081a9d04SBojan Smojver 	for (thr = 0; thr < nr_threads; thr++) {
1243081a9d04SBojan Smojver 		crc->unc[thr] = data[thr].unc;
1244081a9d04SBojan Smojver 		crc->unc_len[thr] = &data[thr].unc_len;
1245f996fc96SBojan Smojver 	}
1246f996fc96SBojan Smojver 
1247081a9d04SBojan Smojver 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1248081a9d04SBojan Smojver 	if (IS_ERR(crc->thr)) {
1249081a9d04SBojan Smojver 		crc->thr = NULL;
125064ec72a1SJoe Perches 		pr_err("Cannot start CRC32 thread\n");
1251081a9d04SBojan Smojver 		ret = -ENOMEM;
1252081a9d04SBojan Smojver 		goto out_clean;
1253f996fc96SBojan Smojver 	}
1254f996fc96SBojan Smojver 
1255081a9d04SBojan Smojver 	/*
12565a21d489SBojan Smojver 	 * Set the number of pages for read buffering.
12575a21d489SBojan Smojver 	 * This is complete guesswork, because we'll only know the real
12585a21d489SBojan Smojver 	 * picture once prepare_image() is called, which is much later on
12595a21d489SBojan Smojver 	 * during the image load phase. We'll assume the worst case and
12605a21d489SBojan Smojver 	 * say that none of the image pages are from high memory.
1261081a9d04SBojan Smojver 	 */
12625a21d489SBojan Smojver 	if (low_free_pages() > snapshot_get_image_size())
12635a21d489SBojan Smojver 		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
12645a21d489SBojan Smojver 	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1265081a9d04SBojan Smojver 
1266081a9d04SBojan Smojver 	for (i = 0; i < read_pages; i++) {
1267081a9d04SBojan Smojver 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
12680eb0b63cSChristoph Hellwig 						  GFP_NOIO | __GFP_HIGH :
12690eb0b63cSChristoph Hellwig 						  GFP_NOIO | __GFP_NOWARN |
12705a21d489SBojan Smojver 						  __GFP_NORETRY);
12715a21d489SBojan Smojver 
1272081a9d04SBojan Smojver 		if (!page[i]) {
1273081a9d04SBojan Smojver 			if (i < LZO_CMP_PAGES) {
1274081a9d04SBojan Smojver 				ring_size = i;
127564ec72a1SJoe Perches 				pr_err("Failed to allocate LZO pages\n");
1276081a9d04SBojan Smojver 				ret = -ENOMEM;
1277081a9d04SBojan Smojver 				goto out_clean;
1278081a9d04SBojan Smojver 			} else {
1279081a9d04SBojan Smojver 				break;
1280081a9d04SBojan Smojver 			}
1281081a9d04SBojan Smojver 		}
1282081a9d04SBojan Smojver 	}
1283081a9d04SBojan Smojver 	want = ring_size = i;
1284081a9d04SBojan Smojver 
128564ec72a1SJoe Perches 	pr_info("Using %u thread(s) for decompression\n", nr_threads);
128664ec72a1SJoe Perches 	pr_info("Loading and decompressing image data (%u pages)...\n",
128764ec72a1SJoe Perches 		nr_to_read);
1288d8150d35SBojan Smojver 	m = nr_to_read / 10;
1289f996fc96SBojan Smojver 	if (!m)
1290f996fc96SBojan Smojver 		m = 1;
1291f996fc96SBojan Smojver 	nr_pages = 0;
1292db597605STina Ruchandani 	start = ktime_get();
1293f996fc96SBojan Smojver 
1294081a9d04SBojan Smojver 	ret = snapshot_write_next(snapshot);
1295081a9d04SBojan Smojver 	if (ret <= 0)
1296f996fc96SBojan Smojver 		goto out_finish;
1297f996fc96SBojan Smojver 
1298f996fc96SBojan Smojver 	for(;;) {
1299081a9d04SBojan Smojver 		for (i = 0; !eof && i < want; i++) {
1300343df3c7SChristoph Hellwig 			ret = swap_read_page(handle, page[ring], &hb);
1301081a9d04SBojan Smojver 			if (ret) {
1302081a9d04SBojan Smojver 				/*
1303081a9d04SBojan Smojver 				 * On real read error, finish. On end of data,
1304081a9d04SBojan Smojver 				 * set EOF flag and just exit the read loop.
1305081a9d04SBojan Smojver 				 */
1306081a9d04SBojan Smojver 				if (handle->cur &&
1307081a9d04SBojan Smojver 				    handle->cur->entries[handle->k]) {
1308081a9d04SBojan Smojver 					goto out_finish;
1309081a9d04SBojan Smojver 				} else {
1310081a9d04SBojan Smojver 					eof = 1;
1311f996fc96SBojan Smojver 					break;
1312f996fc96SBojan Smojver 				}
1313081a9d04SBojan Smojver 			}
1314081a9d04SBojan Smojver 			if (++ring >= ring_size)
1315081a9d04SBojan Smojver 				ring = 0;
1316081a9d04SBojan Smojver 		}
1317081a9d04SBojan Smojver 		asked += i;
1318081a9d04SBojan Smojver 		want -= i;
1319f996fc96SBojan Smojver 
1320081a9d04SBojan Smojver 		/*
1321081a9d04SBojan Smojver 		 * We are out of data, wait for some more.
1322081a9d04SBojan Smojver 		 */
1323081a9d04SBojan Smojver 		if (!have) {
1324081a9d04SBojan Smojver 			if (!asked)
1325081a9d04SBojan Smojver 				break;
1326081a9d04SBojan Smojver 
1327343df3c7SChristoph Hellwig 			ret = hib_wait_io(&hb);
1328081a9d04SBojan Smojver 			if (ret)
1329081a9d04SBojan Smojver 				goto out_finish;
1330081a9d04SBojan Smojver 			have += asked;
1331081a9d04SBojan Smojver 			asked = 0;
1332081a9d04SBojan Smojver 			if (eof)
1333081a9d04SBojan Smojver 				eof = 2;
1334081a9d04SBojan Smojver 		}
1335081a9d04SBojan Smojver 
1336081a9d04SBojan Smojver 		if (crc->run_threads) {
1337*7692e29dSHongchen Zhang 			wait_event(crc->done, atomic_read_acquire(&crc->stop));
1338081a9d04SBojan Smojver 			atomic_set(&crc->stop, 0);
1339081a9d04SBojan Smojver 			crc->run_threads = 0;
1340081a9d04SBojan Smojver 		}
1341081a9d04SBojan Smojver 
1342081a9d04SBojan Smojver 		for (thr = 0; have && thr < nr_threads; thr++) {
1343081a9d04SBojan Smojver 			data[thr].cmp_len = *(size_t *)page[pg];
1344081a9d04SBojan Smojver 			if (unlikely(!data[thr].cmp_len ||
1345081a9d04SBojan Smojver 			             data[thr].cmp_len >
1346081a9d04SBojan Smojver 			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
134764ec72a1SJoe Perches 				pr_err("Invalid LZO compressed length\n");
1348081a9d04SBojan Smojver 				ret = -1;
13499f339cafSBojan Smojver 				goto out_finish;
13509f339cafSBojan Smojver 			}
13519f339cafSBojan Smojver 
1352081a9d04SBojan Smojver 			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1353081a9d04SBojan Smojver 			                    PAGE_SIZE);
1354081a9d04SBojan Smojver 			if (need > have) {
1355081a9d04SBojan Smojver 				if (eof > 1) {
1356081a9d04SBojan Smojver 					ret = -1;
1357f996fc96SBojan Smojver 					goto out_finish;
1358f996fc96SBojan Smojver 				}
1359f996fc96SBojan Smojver 				break;
1360f996fc96SBojan Smojver 			}
1361f996fc96SBojan Smojver 
1362081a9d04SBojan Smojver 			for (off = 0;
1363081a9d04SBojan Smojver 			     off < LZO_HEADER + data[thr].cmp_len;
1364081a9d04SBojan Smojver 			     off += PAGE_SIZE) {
1365081a9d04SBojan Smojver 				memcpy(data[thr].cmp + off,
1366081a9d04SBojan Smojver 				       page[pg], PAGE_SIZE);
1367081a9d04SBojan Smojver 				have--;
1368081a9d04SBojan Smojver 				want++;
1369081a9d04SBojan Smojver 				if (++pg >= ring_size)
1370081a9d04SBojan Smojver 					pg = 0;
1371f996fc96SBojan Smojver 			}
1372f996fc96SBojan Smojver 
1373*7692e29dSHongchen Zhang 			atomic_set_release(&data[thr].ready, 1);
1374081a9d04SBojan Smojver 			wake_up(&data[thr].go);
1375081a9d04SBojan Smojver 		}
1376081a9d04SBojan Smojver 
1377081a9d04SBojan Smojver 		/*
1378081a9d04SBojan Smojver 		 * Wait for more data while we are decompressing.
1379081a9d04SBojan Smojver 		 */
1380081a9d04SBojan Smojver 		if (have < LZO_CMP_PAGES && asked) {
1381343df3c7SChristoph Hellwig 			ret = hib_wait_io(&hb);
1382081a9d04SBojan Smojver 			if (ret)
1383081a9d04SBojan Smojver 				goto out_finish;
1384081a9d04SBojan Smojver 			have += asked;
1385081a9d04SBojan Smojver 			asked = 0;
1386081a9d04SBojan Smojver 			if (eof)
1387081a9d04SBojan Smojver 				eof = 2;
1388081a9d04SBojan Smojver 		}
1389081a9d04SBojan Smojver 
1390081a9d04SBojan Smojver 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1391081a9d04SBojan Smojver 			wait_event(data[thr].done,
1392*7692e29dSHongchen Zhang 				atomic_read_acquire(&data[thr].stop));
1393081a9d04SBojan Smojver 			atomic_set(&data[thr].stop, 0);
1394081a9d04SBojan Smojver 
1395081a9d04SBojan Smojver 			ret = data[thr].ret;
1396081a9d04SBojan Smojver 
1397081a9d04SBojan Smojver 			if (ret < 0) {
139864ec72a1SJoe Perches 				pr_err("LZO decompression failed\n");
1399081a9d04SBojan Smojver 				goto out_finish;
1400081a9d04SBojan Smojver 			}
1401081a9d04SBojan Smojver 
1402081a9d04SBojan Smojver 			if (unlikely(!data[thr].unc_len ||
1403081a9d04SBojan Smojver 			             data[thr].unc_len > LZO_UNC_SIZE ||
1404081a9d04SBojan Smojver 			             data[thr].unc_len & (PAGE_SIZE - 1))) {
140564ec72a1SJoe Perches 				pr_err("Invalid LZO uncompressed length\n");
1406081a9d04SBojan Smojver 				ret = -1;
1407081a9d04SBojan Smojver 				goto out_finish;
1408081a9d04SBojan Smojver 			}
1409081a9d04SBojan Smojver 
1410081a9d04SBojan Smojver 			for (off = 0;
1411081a9d04SBojan Smojver 			     off < data[thr].unc_len; off += PAGE_SIZE) {
1412081a9d04SBojan Smojver 				memcpy(data_of(*snapshot),
1413081a9d04SBojan Smojver 				       data[thr].unc + off, PAGE_SIZE);
1414f996fc96SBojan Smojver 
1415f996fc96SBojan Smojver 				if (!(nr_pages % m))
141664ec72a1SJoe Perches 					pr_info("Image loading progress: %3d%%\n",
1417d8150d35SBojan Smojver 						nr_pages / m * 10);
1418f996fc96SBojan Smojver 				nr_pages++;
1419f996fc96SBojan Smojver 
1420081a9d04SBojan Smojver 				ret = snapshot_write_next(snapshot);
1421081a9d04SBojan Smojver 				if (ret <= 0) {
1422081a9d04SBojan Smojver 					crc->run_threads = thr + 1;
1423*7692e29dSHongchen Zhang 					atomic_set_release(&crc->ready, 1);
1424081a9d04SBojan Smojver 					wake_up(&crc->go);
1425f996fc96SBojan Smojver 					goto out_finish;
1426f996fc96SBojan Smojver 				}
1427f996fc96SBojan Smojver 			}
1428081a9d04SBojan Smojver 		}
1429081a9d04SBojan Smojver 
1430081a9d04SBojan Smojver 		crc->run_threads = thr;
1431*7692e29dSHongchen Zhang 		atomic_set_release(&crc->ready, 1);
1432081a9d04SBojan Smojver 		wake_up(&crc->go);
1433081a9d04SBojan Smojver 	}
1434f996fc96SBojan Smojver 
1435f996fc96SBojan Smojver out_finish:
1436081a9d04SBojan Smojver 	if (crc->run_threads) {
1437*7692e29dSHongchen Zhang 		wait_event(crc->done, atomic_read_acquire(&crc->stop));
1438081a9d04SBojan Smojver 		atomic_set(&crc->stop, 0);
1439081a9d04SBojan Smojver 	}
1440db597605STina Ruchandani 	stop = ktime_get();
1441081a9d04SBojan Smojver 	if (!ret) {
144264ec72a1SJoe Perches 		pr_info("Image loading done\n");
1443f996fc96SBojan Smojver 		snapshot_write_finalize(snapshot);
1444f996fc96SBojan Smojver 		if (!snapshot_image_loaded(snapshot))
1445081a9d04SBojan Smojver 			ret = -ENODATA;
1446081a9d04SBojan Smojver 		if (!ret) {
1447081a9d04SBojan Smojver 			if (swsusp_header->flags & SF_CRC32_MODE) {
1448081a9d04SBojan Smojver 				if(handle->crc32 != swsusp_header->crc32) {
144964ec72a1SJoe Perches 					pr_err("Invalid image CRC32!\n");
1450081a9d04SBojan Smojver 					ret = -ENODATA;
1451081a9d04SBojan Smojver 				}
1452081a9d04SBojan Smojver 			}
1453081a9d04SBojan Smojver 		}
1454d8150d35SBojan Smojver 	}
1455db597605STina Ruchandani 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1456081a9d04SBojan Smojver out_clean:
145755c4478aSXiaoyi Chen 	hib_finish_batch(&hb);
1458081a9d04SBojan Smojver 	for (i = 0; i < ring_size; i++)
14599f339cafSBojan Smojver 		free_page((unsigned long)page[i]);
1460081a9d04SBojan Smojver 	if (crc) {
1461081a9d04SBojan Smojver 		if (crc->thr)
1462081a9d04SBojan Smojver 			kthread_stop(crc->thr);
1463081a9d04SBojan Smojver 		kfree(crc);
1464081a9d04SBojan Smojver 	}
1465081a9d04SBojan Smojver 	if (data) {
1466081a9d04SBojan Smojver 		for (thr = 0; thr < nr_threads; thr++)
1467081a9d04SBojan Smojver 			if (data[thr].thr)
1468081a9d04SBojan Smojver 				kthread_stop(data[thr].thr);
1469081a9d04SBojan Smojver 		vfree(data);
1470081a9d04SBojan Smojver 	}
14716c45de0dSMarkus Elfring 	vfree(page);
1472f996fc96SBojan Smojver 
1473081a9d04SBojan Smojver 	return ret;
1474f996fc96SBojan Smojver }
1475f996fc96SBojan Smojver 
1476f996fc96SBojan Smojver /**
1477a634cc10SRafael J. Wysocki  *	swsusp_read - read the hibernation image.
1478a634cc10SRafael J. Wysocki  *	@flags_p: flags passed by the "frozen" kernel in the image header should
1479b595076aSUwe Kleine-König  *		  be written into this memory location
1480a634cc10SRafael J. Wysocki  */
1481a634cc10SRafael J. Wysocki 
swsusp_read(unsigned int * flags_p)1482a634cc10SRafael J. Wysocki int swsusp_read(unsigned int *flags_p)
148361159a31SRafael J. Wysocki {
148461159a31SRafael J. Wysocki 	int error;
148561159a31SRafael J. Wysocki 	struct swap_map_handle handle;
148661159a31SRafael J. Wysocki 	struct snapshot_handle snapshot;
148761159a31SRafael J. Wysocki 	struct swsusp_info *header;
148861159a31SRafael J. Wysocki 
148961159a31SRafael J. Wysocki 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1490d3c1b24cSJiri Slaby 	error = snapshot_write_next(&snapshot);
1491d5641c64SChengguang Xu 	if (error < (int)PAGE_SIZE)
149261159a31SRafael J. Wysocki 		return error < 0 ? error : -EFAULT;
149361159a31SRafael J. Wysocki 	header = (struct swsusp_info *)data_of(snapshot);
14946f612af5SJiri Slaby 	error = get_swap_reader(&handle, flags_p);
14956f612af5SJiri Slaby 	if (error)
14966f612af5SJiri Slaby 		goto end;
149761159a31SRafael J. Wysocki 	if (!error)
1498546e0d27SAndrew Morton 		error = swap_read_page(&handle, header, NULL);
1499f996fc96SBojan Smojver 	if (!error) {
1500f996fc96SBojan Smojver 		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1501f996fc96SBojan Smojver 			load_image(&handle, &snapshot, header->pages - 1) :
1502f996fc96SBojan Smojver 			load_image_lzo(&handle, &snapshot, header->pages - 1);
1503f996fc96SBojan Smojver 	}
15046f612af5SJiri Slaby 	swap_reader_finish(&handle);
15056f612af5SJiri Slaby end:
150661159a31SRafael J. Wysocki 	if (!error)
150764ec72a1SJoe Perches 		pr_debug("Image successfully loaded\n");
150861159a31SRafael J. Wysocki 	else
150964ec72a1SJoe Perches 		pr_debug("Error %d resuming\n", error);
151061159a31SRafael J. Wysocki 	return error;
151161159a31SRafael J. Wysocki }
151261159a31SRafael J. Wysocki 
1513c889d079SChristoph Hellwig static void *swsusp_holder;
1514c889d079SChristoph Hellwig 
151561159a31SRafael J. Wysocki /**
151661159a31SRafael J. Wysocki  * swsusp_check - Check for swsusp signature in the resume device
151740d84e19SChen Yu  * @exclusive: Open the resume device exclusively.
151861159a31SRafael J. Wysocki  */
151961159a31SRafael J. Wysocki 
swsusp_check(bool exclusive)152040d84e19SChen Yu int swsusp_check(bool exclusive)
152161159a31SRafael J. Wysocki {
152240d84e19SChen Yu 	void *holder = exclusive ? &swsusp_holder : NULL;
152361159a31SRafael J. Wysocki 	int error;
15245904de0dSChen Yu 
152505bdb996SChristoph Hellwig 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
15262736e8eeSChristoph Hellwig 					    holder, NULL);
15278a0d613fSJiri Slaby 	if (!IS_ERR(hib_resume_bdev)) {
15288a0d613fSJiri Slaby 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
15293ecb01dfSJan Beulich 		clear_page(swsusp_header);
1530568e34edSBart Van Assche 		error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
15311b29c164SVivek Goyal 					swsusp_header, NULL);
15329a154d9dSRafael J. Wysocki 		if (error)
153376b57e61SJiri Slaby 			goto put;
15349a154d9dSRafael J. Wysocki 
15353624eb04SRafael J. Wysocki 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
15361b29c164SVivek Goyal 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
153761159a31SRafael J. Wysocki 			/* Reset swap signature now */
1538568e34edSBart Van Assche 			error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1539162b99e3SMike Christie 						swsusp_resume_block,
15401b29c164SVivek Goyal 						swsusp_header, NULL);
154161159a31SRafael J. Wysocki 		} else {
154276b57e61SJiri Slaby 			error = -EINVAL;
154361159a31SRafael J. Wysocki 		}
154474d95555SDavid Woodhouse 		if (!error && swsusp_header->flags & SF_HW_SIG &&
154574d95555SDavid Woodhouse 		    swsusp_header->hw_sig != swsusp_hardware_signature) {
154674d95555SDavid Woodhouse 			pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
154774d95555SDavid Woodhouse 				swsusp_header->hw_sig, swsusp_hardware_signature);
154874d95555SDavid Woodhouse 			error = -EINVAL;
154974d95555SDavid Woodhouse 		}
155076b57e61SJiri Slaby 
155176b57e61SJiri Slaby put:
155261159a31SRafael J. Wysocki 		if (error)
15532736e8eeSChristoph Hellwig 			blkdev_put(hib_resume_bdev, holder);
155461159a31SRafael J. Wysocki 		else
155564ec72a1SJoe Perches 			pr_debug("Image signature found, resuming\n");
155661159a31SRafael J. Wysocki 	} else {
15578a0d613fSJiri Slaby 		error = PTR_ERR(hib_resume_bdev);
155861159a31SRafael J. Wysocki 	}
155961159a31SRafael J. Wysocki 
156061159a31SRafael J. Wysocki 	if (error)
156164ec72a1SJoe Perches 		pr_debug("Image not found (code %d)\n", error);
156261159a31SRafael J. Wysocki 
156361159a31SRafael J. Wysocki 	return error;
156461159a31SRafael J. Wysocki }
156561159a31SRafael J. Wysocki 
156661159a31SRafael J. Wysocki /**
156761159a31SRafael J. Wysocki  * swsusp_close - close swap device.
156840d84e19SChen Yu  * @exclusive: Close the resume device which is exclusively opened.
156961159a31SRafael J. Wysocki  */
157061159a31SRafael J. Wysocki 
swsusp_close(bool exclusive)157140d84e19SChen Yu void swsusp_close(bool exclusive)
157261159a31SRafael J. Wysocki {
15738a0d613fSJiri Slaby 	if (IS_ERR(hib_resume_bdev)) {
157464ec72a1SJoe Perches 		pr_debug("Image device not initialised\n");
157561159a31SRafael J. Wysocki 		return;
157661159a31SRafael J. Wysocki 	}
157761159a31SRafael J. Wysocki 
157840d84e19SChen Yu 	blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
157961159a31SRafael J. Wysocki }
15801b29c164SVivek Goyal 
158162c552ccSBojan Smojver /**
158262c552ccSBojan Smojver  *      swsusp_unmark - Unmark swsusp signature in the resume device
158362c552ccSBojan Smojver  */
158462c552ccSBojan Smojver 
158562c552ccSBojan Smojver #ifdef CONFIG_SUSPEND
swsusp_unmark(void)158662c552ccSBojan Smojver int swsusp_unmark(void)
158762c552ccSBojan Smojver {
158862c552ccSBojan Smojver 	int error;
158962c552ccSBojan Smojver 
1590568e34edSBart Van Assche 	hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1591162b99e3SMike Christie 			swsusp_header, NULL);
159262c552ccSBojan Smojver 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
159362c552ccSBojan Smojver 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1594568e34edSBart Van Assche 		error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1595162b99e3SMike Christie 					swsusp_resume_block,
159662c552ccSBojan Smojver 					swsusp_header, NULL);
159762c552ccSBojan Smojver 	} else {
159864ec72a1SJoe Perches 		pr_err("Cannot find swsusp signature!\n");
159962c552ccSBojan Smojver 		error = -ENODEV;
160062c552ccSBojan Smojver 	}
160162c552ccSBojan Smojver 
160262c552ccSBojan Smojver 	/*
160362c552ccSBojan Smojver 	 * We just returned from suspend, we don't need the image any more.
160462c552ccSBojan Smojver 	 */
160562c552ccSBojan Smojver 	free_all_swap_pages(root_swap);
160662c552ccSBojan Smojver 
160762c552ccSBojan Smojver 	return error;
160862c552ccSBojan Smojver }
160962c552ccSBojan Smojver #endif
161062c552ccSBojan Smojver 
swsusp_header_init(void)1611afd8d7c7SChristophe JAILLET static int __init swsusp_header_init(void)
16121b29c164SVivek Goyal {
16131b29c164SVivek Goyal 	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
16141b29c164SVivek Goyal 	if (!swsusp_header)
16151b29c164SVivek Goyal 		panic("Could not allocate memory for swsusp_header\n");
16161b29c164SVivek Goyal 	return 0;
16171b29c164SVivek Goyal }
16181b29c164SVivek Goyal 
16191b29c164SVivek Goyal core_initcall(swsusp_header_init);
1620