1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/power/swap.c
4 *
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
7 *
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 */
12
13 #define pr_fmt(fmt) "PM: " fmt
14
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/device.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/pm.h>
25 #include <linux/slab.h>
26 #include <linux/lzo.h>
27 #include <linux/vmalloc.h>
28 #include <linux/cpumask.h>
29 #include <linux/atomic.h>
30 #include <linux/kthread.h>
31 #include <linux/crc32.h>
32 #include <linux/ktime.h>
33
34 #include "power.h"
35
36 #define HIBERNATE_SIG "S1SUSPEND"
37
38 u32 swsusp_hardware_signature;
39
40 /*
41 * When reading an {un,}compressed image, we may restore pages in place,
42 * in which case some architectures need these pages cleaning before they
43 * can be executed. We don't know which pages these may be, so clean the lot.
44 */
45 static bool clean_pages_on_read;
46 static bool clean_pages_on_decompress;
47
48 /*
49 * The swap map is a data structure used for keeping track of each page
50 * written to a swap partition. It consists of many swap_map_page
51 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52 * These structures are stored on the swap and linked together with the
53 * help of the .next_swap member.
54 *
55 * The swap map is created during suspend. The swap map pages are
56 * allocated and populated one at a time, so we only need one memory
57 * page to set up the entire structure.
58 *
59 * During resume we pick up all swap_map_page structures into a list.
60 */
61
62 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
63
64 /*
65 * Number of free pages that are not high.
66 */
low_free_pages(void)67 static inline unsigned long low_free_pages(void)
68 {
69 return nr_free_pages() - nr_free_highpages();
70 }
71
72 /*
73 * Number of pages required to be kept free while writing the image. Always
74 * half of all available low pages before the writing starts.
75 */
reqd_free_pages(void)76 static inline unsigned long reqd_free_pages(void)
77 {
78 return low_free_pages() / 2;
79 }
80
81 struct swap_map_page {
82 sector_t entries[MAP_PAGE_ENTRIES];
83 sector_t next_swap;
84 };
85
86 struct swap_map_page_list {
87 struct swap_map_page *map;
88 struct swap_map_page_list *next;
89 };
90
91 /*
92 * The swap_map_handle structure is used for handling swap in
93 * a file-alike way
94 */
95
96 struct swap_map_handle {
97 struct swap_map_page *cur;
98 struct swap_map_page_list *maps;
99 sector_t cur_swap;
100 sector_t first_sector;
101 unsigned int k;
102 unsigned long reqd_free_pages;
103 u32 crc32;
104 };
105
106 struct swsusp_header {
107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 sizeof(u32) - sizeof(u32)];
109 u32 hw_sig;
110 u32 crc32;
111 sector_t image;
112 unsigned int flags; /* Flags to pass to the "boot" kernel */
113 char orig_sig[10];
114 char sig[10];
115 } __packed;
116
117 static struct swsusp_header *swsusp_header;
118
119 /*
120 * The following functions are used for tracing the allocated
121 * swap pages, so that they can be freed in case of an error.
122 */
123
124 struct swsusp_extent {
125 struct rb_node node;
126 unsigned long start;
127 unsigned long end;
128 };
129
130 static struct rb_root swsusp_extents = RB_ROOT;
131
swsusp_extents_insert(unsigned long swap_offset)132 static int swsusp_extents_insert(unsigned long swap_offset)
133 {
134 struct rb_node **new = &(swsusp_extents.rb_node);
135 struct rb_node *parent = NULL;
136 struct swsusp_extent *ext;
137
138 /* Figure out where to put the new node */
139 while (*new) {
140 ext = rb_entry(*new, struct swsusp_extent, node);
141 parent = *new;
142 if (swap_offset < ext->start) {
143 /* Try to merge */
144 if (swap_offset == ext->start - 1) {
145 ext->start--;
146 return 0;
147 }
148 new = &((*new)->rb_left);
149 } else if (swap_offset > ext->end) {
150 /* Try to merge */
151 if (swap_offset == ext->end + 1) {
152 ext->end++;
153 return 0;
154 }
155 new = &((*new)->rb_right);
156 } else {
157 /* It already is in the tree */
158 return -EINVAL;
159 }
160 }
161 /* Add the new node and rebalance the tree. */
162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163 if (!ext)
164 return -ENOMEM;
165
166 ext->start = swap_offset;
167 ext->end = swap_offset;
168 rb_link_node(&ext->node, parent, new);
169 rb_insert_color(&ext->node, &swsusp_extents);
170 return 0;
171 }
172
173 /*
174 * alloc_swapdev_block - allocate a swap page and register that it has
175 * been allocated, so that it can be freed in case of an error.
176 */
177
alloc_swapdev_block(int swap)178 sector_t alloc_swapdev_block(int swap)
179 {
180 unsigned long offset;
181
182 offset = swp_offset(get_swap_page_of_type(swap));
183 if (offset) {
184 if (swsusp_extents_insert(offset))
185 swap_free(swp_entry(swap, offset));
186 else
187 return swapdev_block(swap, offset);
188 }
189 return 0;
190 }
191
192 /*
193 * free_all_swap_pages - free swap pages allocated for saving image data.
194 * It also frees the extents used to register which swap entries had been
195 * allocated.
196 */
197
free_all_swap_pages(int swap)198 void free_all_swap_pages(int swap)
199 {
200 struct rb_node *node;
201
202 while ((node = swsusp_extents.rb_node)) {
203 struct swsusp_extent *ext;
204 unsigned long offset;
205
206 ext = rb_entry(node, struct swsusp_extent, node);
207 rb_erase(node, &swsusp_extents);
208 for (offset = ext->start; offset <= ext->end; offset++)
209 swap_free(swp_entry(swap, offset));
210
211 kfree(ext);
212 }
213 }
214
swsusp_swap_in_use(void)215 int swsusp_swap_in_use(void)
216 {
217 return (swsusp_extents.rb_node != NULL);
218 }
219
220 /*
221 * General things
222 */
223
224 static unsigned short root_swap = 0xffff;
225 static struct block_device *hib_resume_bdev;
226
227 struct hib_bio_batch {
228 atomic_t count;
229 wait_queue_head_t wait;
230 blk_status_t error;
231 struct blk_plug plug;
232 };
233
hib_init_batch(struct hib_bio_batch * hb)234 static void hib_init_batch(struct hib_bio_batch *hb)
235 {
236 atomic_set(&hb->count, 0);
237 init_waitqueue_head(&hb->wait);
238 hb->error = BLK_STS_OK;
239 blk_start_plug(&hb->plug);
240 }
241
hib_finish_batch(struct hib_bio_batch * hb)242 static void hib_finish_batch(struct hib_bio_batch *hb)
243 {
244 blk_finish_plug(&hb->plug);
245 }
246
hib_end_io(struct bio * bio)247 static void hib_end_io(struct bio *bio)
248 {
249 struct hib_bio_batch *hb = bio->bi_private;
250 struct page *page = bio_first_page_all(bio);
251
252 if (bio->bi_status) {
253 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
254 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255 (unsigned long long)bio->bi_iter.bi_sector);
256 }
257
258 if (bio_data_dir(bio) == WRITE)
259 put_page(page);
260 else if (clean_pages_on_read)
261 flush_icache_range((unsigned long)page_address(page),
262 (unsigned long)page_address(page) + PAGE_SIZE);
263
264 if (bio->bi_status && !hb->error)
265 hb->error = bio->bi_status;
266 if (atomic_dec_and_test(&hb->count))
267 wake_up(&hb->wait);
268
269 bio_put(bio);
270 }
271
hib_submit_io(blk_opf_t opf,pgoff_t page_off,void * addr,struct hib_bio_batch * hb)272 static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273 struct hib_bio_batch *hb)
274 {
275 struct page *page = virt_to_page(addr);
276 struct bio *bio;
277 int error = 0;
278
279 bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
280 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
281
282 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
283 pr_err("Adding page to bio failed at %llu\n",
284 (unsigned long long)bio->bi_iter.bi_sector);
285 bio_put(bio);
286 return -EFAULT;
287 }
288
289 if (hb) {
290 bio->bi_end_io = hib_end_io;
291 bio->bi_private = hb;
292 atomic_inc(&hb->count);
293 submit_bio(bio);
294 } else {
295 error = submit_bio_wait(bio);
296 bio_put(bio);
297 }
298
299 return error;
300 }
301
hib_wait_io(struct hib_bio_batch * hb)302 static int hib_wait_io(struct hib_bio_batch *hb)
303 {
304 /*
305 * We are relying on the behavior of blk_plug that a thread with
306 * a plug will flush the plug list before sleeping.
307 */
308 wait_event(hb->wait, atomic_read(&hb->count) == 0);
309 return blk_status_to_errno(hb->error);
310 }
311
312 /*
313 * Saving part
314 */
mark_swapfiles(struct swap_map_handle * handle,unsigned int flags)315 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
316 {
317 int error;
318
319 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
320 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
321 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
322 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
323 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
324 swsusp_header->image = handle->first_sector;
325 if (swsusp_hardware_signature) {
326 swsusp_header->hw_sig = swsusp_hardware_signature;
327 flags |= SF_HW_SIG;
328 }
329 swsusp_header->flags = flags;
330 if (flags & SF_CRC32_MODE)
331 swsusp_header->crc32 = handle->crc32;
332 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
333 swsusp_resume_block, swsusp_header, NULL);
334 } else {
335 pr_err("Swap header not found!\n");
336 error = -ENODEV;
337 }
338 return error;
339 }
340
341 /**
342 * swsusp_swap_check - check if the resume device is a swap device
343 * and get its index (if so)
344 *
345 * This is called before saving image
346 */
swsusp_swap_check(void)347 static int swsusp_swap_check(void)
348 {
349 int res;
350
351 if (swsusp_resume_device)
352 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
353 else
354 res = find_first_swap(&swsusp_resume_device);
355 if (res < 0)
356 return res;
357 root_swap = res;
358
359 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
360 BLK_OPEN_WRITE, NULL, NULL);
361 if (IS_ERR(hib_resume_bdev))
362 return PTR_ERR(hib_resume_bdev);
363
364 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
365 if (res < 0)
366 blkdev_put(hib_resume_bdev, NULL);
367
368 return res;
369 }
370
371 /**
372 * write_page - Write one page to given swap location.
373 * @buf: Address we're writing.
374 * @offset: Offset of the swap page we're writing to.
375 * @hb: bio completion batch
376 */
377
write_page(void * buf,sector_t offset,struct hib_bio_batch * hb)378 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
379 {
380 void *src;
381 int ret;
382
383 if (!offset)
384 return -ENOSPC;
385
386 if (hb) {
387 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
388 __GFP_NORETRY);
389 if (src) {
390 copy_page(src, buf);
391 } else {
392 ret = hib_wait_io(hb); /* Free pages */
393 if (ret)
394 return ret;
395 src = (void *)__get_free_page(GFP_NOIO |
396 __GFP_NOWARN |
397 __GFP_NORETRY);
398 if (src) {
399 copy_page(src, buf);
400 } else {
401 WARN_ON_ONCE(1);
402 hb = NULL; /* Go synchronous */
403 src = buf;
404 }
405 }
406 } else {
407 src = buf;
408 }
409 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
410 }
411
release_swap_writer(struct swap_map_handle * handle)412 static void release_swap_writer(struct swap_map_handle *handle)
413 {
414 if (handle->cur)
415 free_page((unsigned long)handle->cur);
416 handle->cur = NULL;
417 }
418
get_swap_writer(struct swap_map_handle * handle)419 static int get_swap_writer(struct swap_map_handle *handle)
420 {
421 int ret;
422
423 ret = swsusp_swap_check();
424 if (ret) {
425 if (ret != -ENOSPC)
426 pr_err("Cannot find swap device, try swapon -a\n");
427 return ret;
428 }
429 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
430 if (!handle->cur) {
431 ret = -ENOMEM;
432 goto err_close;
433 }
434 handle->cur_swap = alloc_swapdev_block(root_swap);
435 if (!handle->cur_swap) {
436 ret = -ENOSPC;
437 goto err_rel;
438 }
439 handle->k = 0;
440 handle->reqd_free_pages = reqd_free_pages();
441 handle->first_sector = handle->cur_swap;
442 return 0;
443 err_rel:
444 release_swap_writer(handle);
445 err_close:
446 swsusp_close(false);
447 return ret;
448 }
449
swap_write_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)450 static int swap_write_page(struct swap_map_handle *handle, void *buf,
451 struct hib_bio_batch *hb)
452 {
453 int error = 0;
454 sector_t offset;
455
456 if (!handle->cur)
457 return -EINVAL;
458 offset = alloc_swapdev_block(root_swap);
459 error = write_page(buf, offset, hb);
460 if (error)
461 return error;
462 handle->cur->entries[handle->k++] = offset;
463 if (handle->k >= MAP_PAGE_ENTRIES) {
464 offset = alloc_swapdev_block(root_swap);
465 if (!offset)
466 return -ENOSPC;
467 handle->cur->next_swap = offset;
468 error = write_page(handle->cur, handle->cur_swap, hb);
469 if (error)
470 goto out;
471 clear_page(handle->cur);
472 handle->cur_swap = offset;
473 handle->k = 0;
474
475 if (hb && low_free_pages() <= handle->reqd_free_pages) {
476 error = hib_wait_io(hb);
477 if (error)
478 goto out;
479 /*
480 * Recalculate the number of required free pages, to
481 * make sure we never take more than half.
482 */
483 handle->reqd_free_pages = reqd_free_pages();
484 }
485 }
486 out:
487 return error;
488 }
489
flush_swap_writer(struct swap_map_handle * handle)490 static int flush_swap_writer(struct swap_map_handle *handle)
491 {
492 if (handle->cur && handle->cur_swap)
493 return write_page(handle->cur, handle->cur_swap, NULL);
494 else
495 return -EINVAL;
496 }
497
swap_writer_finish(struct swap_map_handle * handle,unsigned int flags,int error)498 static int swap_writer_finish(struct swap_map_handle *handle,
499 unsigned int flags, int error)
500 {
501 if (!error) {
502 pr_info("S");
503 error = mark_swapfiles(handle, flags);
504 pr_cont("|\n");
505 flush_swap_writer(handle);
506 }
507
508 if (error)
509 free_all_swap_pages(root_swap);
510 release_swap_writer(handle);
511 swsusp_close(false);
512
513 return error;
514 }
515
516 /* We need to remember how much compressed data we need to read. */
517 #define LZO_HEADER sizeof(size_t)
518
519 /* Number of pages/bytes we'll compress at one time. */
520 #define LZO_UNC_PAGES 32
521 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
522
523 /* Number of pages/bytes we need for compressed data (worst case). */
524 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
525 LZO_HEADER, PAGE_SIZE)
526 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
527
528 /* Maximum number of threads for compression/decompression. */
529 #define LZO_THREADS 3
530
531 /* Minimum/maximum number of pages for read buffering. */
532 #define LZO_MIN_RD_PAGES 1024
533 #define LZO_MAX_RD_PAGES 8192
534
535
536 /**
537 * save_image - save the suspend image data
538 */
539
save_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)540 static int save_image(struct swap_map_handle *handle,
541 struct snapshot_handle *snapshot,
542 unsigned int nr_to_write)
543 {
544 unsigned int m;
545 int ret;
546 int nr_pages;
547 int err2;
548 struct hib_bio_batch hb;
549 ktime_t start;
550 ktime_t stop;
551
552 hib_init_batch(&hb);
553
554 pr_info("Saving image data pages (%u pages)...\n",
555 nr_to_write);
556 m = nr_to_write / 10;
557 if (!m)
558 m = 1;
559 nr_pages = 0;
560 start = ktime_get();
561 while (1) {
562 ret = snapshot_read_next(snapshot);
563 if (ret <= 0)
564 break;
565 ret = swap_write_page(handle, data_of(*snapshot), &hb);
566 if (ret)
567 break;
568 if (!(nr_pages % m))
569 pr_info("Image saving progress: %3d%%\n",
570 nr_pages / m * 10);
571 nr_pages++;
572 }
573 err2 = hib_wait_io(&hb);
574 hib_finish_batch(&hb);
575 stop = ktime_get();
576 if (!ret)
577 ret = err2;
578 if (!ret)
579 pr_info("Image saving done\n");
580 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
581 return ret;
582 }
583
584 /*
585 * Structure used for CRC32.
586 */
587 struct crc_data {
588 struct task_struct *thr; /* thread */
589 atomic_t ready; /* ready to start flag */
590 atomic_t stop; /* ready to stop flag */
591 unsigned run_threads; /* nr current threads */
592 wait_queue_head_t go; /* start crc update */
593 wait_queue_head_t done; /* crc update done */
594 u32 *crc32; /* points to handle's crc32 */
595 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
596 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
597 };
598
599 /*
600 * CRC32 update function that runs in its own thread.
601 */
crc32_threadfn(void * data)602 static int crc32_threadfn(void *data)
603 {
604 struct crc_data *d = data;
605 unsigned i;
606
607 while (1) {
608 wait_event(d->go, atomic_read_acquire(&d->ready) ||
609 kthread_should_stop());
610 if (kthread_should_stop()) {
611 d->thr = NULL;
612 atomic_set_release(&d->stop, 1);
613 wake_up(&d->done);
614 break;
615 }
616 atomic_set(&d->ready, 0);
617
618 for (i = 0; i < d->run_threads; i++)
619 *d->crc32 = crc32_le(*d->crc32,
620 d->unc[i], *d->unc_len[i]);
621 atomic_set_release(&d->stop, 1);
622 wake_up(&d->done);
623 }
624 return 0;
625 }
626 /*
627 * Structure used for LZO data compression.
628 */
629 struct cmp_data {
630 struct task_struct *thr; /* thread */
631 atomic_t ready; /* ready to start flag */
632 atomic_t stop; /* ready to stop flag */
633 int ret; /* return code */
634 wait_queue_head_t go; /* start compression */
635 wait_queue_head_t done; /* compression done */
636 size_t unc_len; /* uncompressed length */
637 size_t cmp_len; /* compressed length */
638 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
639 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
640 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
641 };
642
643 /*
644 * Compression function that runs in its own thread.
645 */
lzo_compress_threadfn(void * data)646 static int lzo_compress_threadfn(void *data)
647 {
648 struct cmp_data *d = data;
649
650 while (1) {
651 wait_event(d->go, atomic_read_acquire(&d->ready) ||
652 kthread_should_stop());
653 if (kthread_should_stop()) {
654 d->thr = NULL;
655 d->ret = -1;
656 atomic_set_release(&d->stop, 1);
657 wake_up(&d->done);
658 break;
659 }
660 atomic_set(&d->ready, 0);
661
662 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
663 d->cmp + LZO_HEADER, &d->cmp_len,
664 d->wrk);
665 atomic_set_release(&d->stop, 1);
666 wake_up(&d->done);
667 }
668 return 0;
669 }
670
671 /**
672 * save_image_lzo - Save the suspend image data compressed with LZO.
673 * @handle: Swap map handle to use for saving the image.
674 * @snapshot: Image to read data from.
675 * @nr_to_write: Number of pages to save.
676 */
save_image_lzo(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)677 static int save_image_lzo(struct swap_map_handle *handle,
678 struct snapshot_handle *snapshot,
679 unsigned int nr_to_write)
680 {
681 unsigned int m;
682 int ret = 0;
683 int nr_pages;
684 int err2;
685 struct hib_bio_batch hb;
686 ktime_t start;
687 ktime_t stop;
688 size_t off;
689 unsigned thr, run_threads, nr_threads;
690 unsigned char *page = NULL;
691 struct cmp_data *data = NULL;
692 struct crc_data *crc = NULL;
693
694 hib_init_batch(&hb);
695
696 /*
697 * We'll limit the number of threads for compression to limit memory
698 * footprint.
699 */
700 nr_threads = num_online_cpus() - 1;
701 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
702
703 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
704 if (!page) {
705 pr_err("Failed to allocate LZO page\n");
706 ret = -ENOMEM;
707 goto out_clean;
708 }
709
710 data = vzalloc(array_size(nr_threads, sizeof(*data)));
711 if (!data) {
712 pr_err("Failed to allocate LZO data\n");
713 ret = -ENOMEM;
714 goto out_clean;
715 }
716
717 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
718 if (!crc) {
719 pr_err("Failed to allocate crc\n");
720 ret = -ENOMEM;
721 goto out_clean;
722 }
723
724 /*
725 * Start the compression threads.
726 */
727 for (thr = 0; thr < nr_threads; thr++) {
728 init_waitqueue_head(&data[thr].go);
729 init_waitqueue_head(&data[thr].done);
730
731 data[thr].thr = kthread_run(lzo_compress_threadfn,
732 &data[thr],
733 "image_compress/%u", thr);
734 if (IS_ERR(data[thr].thr)) {
735 data[thr].thr = NULL;
736 pr_err("Cannot start compression threads\n");
737 ret = -ENOMEM;
738 goto out_clean;
739 }
740 }
741
742 /*
743 * Start the CRC32 thread.
744 */
745 init_waitqueue_head(&crc->go);
746 init_waitqueue_head(&crc->done);
747
748 handle->crc32 = 0;
749 crc->crc32 = &handle->crc32;
750 for (thr = 0; thr < nr_threads; thr++) {
751 crc->unc[thr] = data[thr].unc;
752 crc->unc_len[thr] = &data[thr].unc_len;
753 }
754
755 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
756 if (IS_ERR(crc->thr)) {
757 crc->thr = NULL;
758 pr_err("Cannot start CRC32 thread\n");
759 ret = -ENOMEM;
760 goto out_clean;
761 }
762
763 /*
764 * Adjust the number of required free pages after all allocations have
765 * been done. We don't want to run out of pages when writing.
766 */
767 handle->reqd_free_pages = reqd_free_pages();
768
769 pr_info("Using %u thread(s) for compression\n", nr_threads);
770 pr_info("Compressing and saving image data (%u pages)...\n",
771 nr_to_write);
772 m = nr_to_write / 10;
773 if (!m)
774 m = 1;
775 nr_pages = 0;
776 start = ktime_get();
777 for (;;) {
778 for (thr = 0; thr < nr_threads; thr++) {
779 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
780 ret = snapshot_read_next(snapshot);
781 if (ret < 0)
782 goto out_finish;
783
784 if (!ret)
785 break;
786
787 memcpy(data[thr].unc + off,
788 data_of(*snapshot), PAGE_SIZE);
789
790 if (!(nr_pages % m))
791 pr_info("Image saving progress: %3d%%\n",
792 nr_pages / m * 10);
793 nr_pages++;
794 }
795 if (!off)
796 break;
797
798 data[thr].unc_len = off;
799
800 atomic_set_release(&data[thr].ready, 1);
801 wake_up(&data[thr].go);
802 }
803
804 if (!thr)
805 break;
806
807 crc->run_threads = thr;
808 atomic_set_release(&crc->ready, 1);
809 wake_up(&crc->go);
810
811 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
812 wait_event(data[thr].done,
813 atomic_read_acquire(&data[thr].stop));
814 atomic_set(&data[thr].stop, 0);
815
816 ret = data[thr].ret;
817
818 if (ret < 0) {
819 pr_err("LZO compression failed\n");
820 goto out_finish;
821 }
822
823 if (unlikely(!data[thr].cmp_len ||
824 data[thr].cmp_len >
825 lzo1x_worst_compress(data[thr].unc_len))) {
826 pr_err("Invalid LZO compressed length\n");
827 ret = -1;
828 goto out_finish;
829 }
830
831 *(size_t *)data[thr].cmp = data[thr].cmp_len;
832
833 /*
834 * Given we are writing one page at a time to disk, we
835 * copy that much from the buffer, although the last
836 * bit will likely be smaller than full page. This is
837 * OK - we saved the length of the compressed data, so
838 * any garbage at the end will be discarded when we
839 * read it.
840 */
841 for (off = 0;
842 off < LZO_HEADER + data[thr].cmp_len;
843 off += PAGE_SIZE) {
844 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
845
846 ret = swap_write_page(handle, page, &hb);
847 if (ret)
848 goto out_finish;
849 }
850 }
851
852 wait_event(crc->done, atomic_read_acquire(&crc->stop));
853 atomic_set(&crc->stop, 0);
854 }
855
856 out_finish:
857 err2 = hib_wait_io(&hb);
858 stop = ktime_get();
859 if (!ret)
860 ret = err2;
861 if (!ret)
862 pr_info("Image saving done\n");
863 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
864 out_clean:
865 hib_finish_batch(&hb);
866 if (crc) {
867 if (crc->thr)
868 kthread_stop(crc->thr);
869 kfree(crc);
870 }
871 if (data) {
872 for (thr = 0; thr < nr_threads; thr++)
873 if (data[thr].thr)
874 kthread_stop(data[thr].thr);
875 vfree(data);
876 }
877 if (page) free_page((unsigned long)page);
878
879 return ret;
880 }
881
882 /**
883 * enough_swap - Make sure we have enough swap to save the image.
884 *
885 * Returns TRUE or FALSE after checking the total amount of swap
886 * space available from the resume partition.
887 */
888
enough_swap(unsigned int nr_pages)889 static int enough_swap(unsigned int nr_pages)
890 {
891 unsigned int free_swap = count_swap_pages(root_swap, 1);
892 unsigned int required;
893
894 pr_debug("Free swap pages: %u\n", free_swap);
895
896 required = PAGES_FOR_IO + nr_pages;
897 return free_swap > required;
898 }
899
900 /**
901 * swsusp_write - Write entire image and metadata.
902 * @flags: flags to pass to the "boot" kernel in the image header
903 *
904 * It is important _NOT_ to umount filesystems at this point. We want
905 * them synced (in case something goes wrong) but we DO not want to mark
906 * filesystem clean: it is not. (And it does not matter, if we resume
907 * correctly, we'll mark system clean, anyway.)
908 */
909
swsusp_write(unsigned int flags)910 int swsusp_write(unsigned int flags)
911 {
912 struct swap_map_handle handle;
913 struct snapshot_handle snapshot;
914 struct swsusp_info *header;
915 unsigned long pages;
916 int error;
917
918 pages = snapshot_get_image_size();
919 error = get_swap_writer(&handle);
920 if (error) {
921 pr_err("Cannot get swap writer\n");
922 return error;
923 }
924 if (flags & SF_NOCOMPRESS_MODE) {
925 if (!enough_swap(pages)) {
926 pr_err("Not enough free swap\n");
927 error = -ENOSPC;
928 goto out_finish;
929 }
930 }
931 memset(&snapshot, 0, sizeof(struct snapshot_handle));
932 error = snapshot_read_next(&snapshot);
933 if (error < (int)PAGE_SIZE) {
934 if (error >= 0)
935 error = -EFAULT;
936
937 goto out_finish;
938 }
939 header = (struct swsusp_info *)data_of(snapshot);
940 error = swap_write_page(&handle, header, NULL);
941 if (!error) {
942 error = (flags & SF_NOCOMPRESS_MODE) ?
943 save_image(&handle, &snapshot, pages - 1) :
944 save_image_lzo(&handle, &snapshot, pages - 1);
945 }
946 out_finish:
947 error = swap_writer_finish(&handle, flags, error);
948 return error;
949 }
950
951 /*
952 * The following functions allow us to read data using a swap map
953 * in a file-like way.
954 */
955
release_swap_reader(struct swap_map_handle * handle)956 static void release_swap_reader(struct swap_map_handle *handle)
957 {
958 struct swap_map_page_list *tmp;
959
960 while (handle->maps) {
961 if (handle->maps->map)
962 free_page((unsigned long)handle->maps->map);
963 tmp = handle->maps;
964 handle->maps = handle->maps->next;
965 kfree(tmp);
966 }
967 handle->cur = NULL;
968 }
969
get_swap_reader(struct swap_map_handle * handle,unsigned int * flags_p)970 static int get_swap_reader(struct swap_map_handle *handle,
971 unsigned int *flags_p)
972 {
973 int error;
974 struct swap_map_page_list *tmp, *last;
975 sector_t offset;
976
977 *flags_p = swsusp_header->flags;
978
979 if (!swsusp_header->image) /* how can this happen? */
980 return -EINVAL;
981
982 handle->cur = NULL;
983 last = handle->maps = NULL;
984 offset = swsusp_header->image;
985 while (offset) {
986 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
987 if (!tmp) {
988 release_swap_reader(handle);
989 return -ENOMEM;
990 }
991 if (!handle->maps)
992 handle->maps = tmp;
993 if (last)
994 last->next = tmp;
995 last = tmp;
996
997 tmp->map = (struct swap_map_page *)
998 __get_free_page(GFP_NOIO | __GFP_HIGH);
999 if (!tmp->map) {
1000 release_swap_reader(handle);
1001 return -ENOMEM;
1002 }
1003
1004 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1005 if (error) {
1006 release_swap_reader(handle);
1007 return error;
1008 }
1009 offset = tmp->map->next_swap;
1010 }
1011 handle->k = 0;
1012 handle->cur = handle->maps->map;
1013 return 0;
1014 }
1015
swap_read_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)1016 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1017 struct hib_bio_batch *hb)
1018 {
1019 sector_t offset;
1020 int error;
1021 struct swap_map_page_list *tmp;
1022
1023 if (!handle->cur)
1024 return -EINVAL;
1025 offset = handle->cur->entries[handle->k];
1026 if (!offset)
1027 return -EFAULT;
1028 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1029 if (error)
1030 return error;
1031 if (++handle->k >= MAP_PAGE_ENTRIES) {
1032 handle->k = 0;
1033 free_page((unsigned long)handle->maps->map);
1034 tmp = handle->maps;
1035 handle->maps = handle->maps->next;
1036 kfree(tmp);
1037 if (!handle->maps)
1038 release_swap_reader(handle);
1039 else
1040 handle->cur = handle->maps->map;
1041 }
1042 return error;
1043 }
1044
swap_reader_finish(struct swap_map_handle * handle)1045 static int swap_reader_finish(struct swap_map_handle *handle)
1046 {
1047 release_swap_reader(handle);
1048
1049 return 0;
1050 }
1051
1052 /**
1053 * load_image - load the image using the swap map handle
1054 * @handle and the snapshot handle @snapshot
1055 * (assume there are @nr_pages pages to load)
1056 */
1057
load_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)1058 static int load_image(struct swap_map_handle *handle,
1059 struct snapshot_handle *snapshot,
1060 unsigned int nr_to_read)
1061 {
1062 unsigned int m;
1063 int ret = 0;
1064 ktime_t start;
1065 ktime_t stop;
1066 struct hib_bio_batch hb;
1067 int err2;
1068 unsigned nr_pages;
1069
1070 hib_init_batch(&hb);
1071
1072 clean_pages_on_read = true;
1073 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1074 m = nr_to_read / 10;
1075 if (!m)
1076 m = 1;
1077 nr_pages = 0;
1078 start = ktime_get();
1079 for ( ; ; ) {
1080 ret = snapshot_write_next(snapshot);
1081 if (ret <= 0)
1082 break;
1083 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1084 if (ret)
1085 break;
1086 if (snapshot->sync_read)
1087 ret = hib_wait_io(&hb);
1088 if (ret)
1089 break;
1090 if (!(nr_pages % m))
1091 pr_info("Image loading progress: %3d%%\n",
1092 nr_pages / m * 10);
1093 nr_pages++;
1094 }
1095 err2 = hib_wait_io(&hb);
1096 hib_finish_batch(&hb);
1097 stop = ktime_get();
1098 if (!ret)
1099 ret = err2;
1100 if (!ret) {
1101 pr_info("Image loading done\n");
1102 snapshot_write_finalize(snapshot);
1103 if (!snapshot_image_loaded(snapshot))
1104 ret = -ENODATA;
1105 }
1106 swsusp_show_speed(start, stop, nr_to_read, "Read");
1107 return ret;
1108 }
1109
1110 /*
1111 * Structure used for LZO data decompression.
1112 */
1113 struct dec_data {
1114 struct task_struct *thr; /* thread */
1115 atomic_t ready; /* ready to start flag */
1116 atomic_t stop; /* ready to stop flag */
1117 int ret; /* return code */
1118 wait_queue_head_t go; /* start decompression */
1119 wait_queue_head_t done; /* decompression done */
1120 size_t unc_len; /* uncompressed length */
1121 size_t cmp_len; /* compressed length */
1122 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1123 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1124 };
1125
1126 /*
1127 * Decompression function that runs in its own thread.
1128 */
lzo_decompress_threadfn(void * data)1129 static int lzo_decompress_threadfn(void *data)
1130 {
1131 struct dec_data *d = data;
1132
1133 while (1) {
1134 wait_event(d->go, atomic_read_acquire(&d->ready) ||
1135 kthread_should_stop());
1136 if (kthread_should_stop()) {
1137 d->thr = NULL;
1138 d->ret = -1;
1139 atomic_set_release(&d->stop, 1);
1140 wake_up(&d->done);
1141 break;
1142 }
1143 atomic_set(&d->ready, 0);
1144
1145 d->unc_len = LZO_UNC_SIZE;
1146 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1147 d->unc, &d->unc_len);
1148 if (clean_pages_on_decompress)
1149 flush_icache_range((unsigned long)d->unc,
1150 (unsigned long)d->unc + d->unc_len);
1151
1152 atomic_set_release(&d->stop, 1);
1153 wake_up(&d->done);
1154 }
1155 return 0;
1156 }
1157
1158 /**
1159 * load_image_lzo - Load compressed image data and decompress them with LZO.
1160 * @handle: Swap map handle to use for loading data.
1161 * @snapshot: Image to copy uncompressed data into.
1162 * @nr_to_read: Number of pages to load.
1163 */
load_image_lzo(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)1164 static int load_image_lzo(struct swap_map_handle *handle,
1165 struct snapshot_handle *snapshot,
1166 unsigned int nr_to_read)
1167 {
1168 unsigned int m;
1169 int ret = 0;
1170 int eof = 0;
1171 struct hib_bio_batch hb;
1172 ktime_t start;
1173 ktime_t stop;
1174 unsigned nr_pages;
1175 size_t off;
1176 unsigned i, thr, run_threads, nr_threads;
1177 unsigned ring = 0, pg = 0, ring_size = 0,
1178 have = 0, want, need, asked = 0;
1179 unsigned long read_pages = 0;
1180 unsigned char **page = NULL;
1181 struct dec_data *data = NULL;
1182 struct crc_data *crc = NULL;
1183
1184 hib_init_batch(&hb);
1185
1186 /*
1187 * We'll limit the number of threads for decompression to limit memory
1188 * footprint.
1189 */
1190 nr_threads = num_online_cpus() - 1;
1191 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1192
1193 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1194 if (!page) {
1195 pr_err("Failed to allocate LZO page\n");
1196 ret = -ENOMEM;
1197 goto out_clean;
1198 }
1199
1200 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1201 if (!data) {
1202 pr_err("Failed to allocate LZO data\n");
1203 ret = -ENOMEM;
1204 goto out_clean;
1205 }
1206
1207 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1208 if (!crc) {
1209 pr_err("Failed to allocate crc\n");
1210 ret = -ENOMEM;
1211 goto out_clean;
1212 }
1213
1214 clean_pages_on_decompress = true;
1215
1216 /*
1217 * Start the decompression threads.
1218 */
1219 for (thr = 0; thr < nr_threads; thr++) {
1220 init_waitqueue_head(&data[thr].go);
1221 init_waitqueue_head(&data[thr].done);
1222
1223 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1224 &data[thr],
1225 "image_decompress/%u", thr);
1226 if (IS_ERR(data[thr].thr)) {
1227 data[thr].thr = NULL;
1228 pr_err("Cannot start decompression threads\n");
1229 ret = -ENOMEM;
1230 goto out_clean;
1231 }
1232 }
1233
1234 /*
1235 * Start the CRC32 thread.
1236 */
1237 init_waitqueue_head(&crc->go);
1238 init_waitqueue_head(&crc->done);
1239
1240 handle->crc32 = 0;
1241 crc->crc32 = &handle->crc32;
1242 for (thr = 0; thr < nr_threads; thr++) {
1243 crc->unc[thr] = data[thr].unc;
1244 crc->unc_len[thr] = &data[thr].unc_len;
1245 }
1246
1247 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1248 if (IS_ERR(crc->thr)) {
1249 crc->thr = NULL;
1250 pr_err("Cannot start CRC32 thread\n");
1251 ret = -ENOMEM;
1252 goto out_clean;
1253 }
1254
1255 /*
1256 * Set the number of pages for read buffering.
1257 * This is complete guesswork, because we'll only know the real
1258 * picture once prepare_image() is called, which is much later on
1259 * during the image load phase. We'll assume the worst case and
1260 * say that none of the image pages are from high memory.
1261 */
1262 if (low_free_pages() > snapshot_get_image_size())
1263 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1264 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1265
1266 for (i = 0; i < read_pages; i++) {
1267 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1268 GFP_NOIO | __GFP_HIGH :
1269 GFP_NOIO | __GFP_NOWARN |
1270 __GFP_NORETRY);
1271
1272 if (!page[i]) {
1273 if (i < LZO_CMP_PAGES) {
1274 ring_size = i;
1275 pr_err("Failed to allocate LZO pages\n");
1276 ret = -ENOMEM;
1277 goto out_clean;
1278 } else {
1279 break;
1280 }
1281 }
1282 }
1283 want = ring_size = i;
1284
1285 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1286 pr_info("Loading and decompressing image data (%u pages)...\n",
1287 nr_to_read);
1288 m = nr_to_read / 10;
1289 if (!m)
1290 m = 1;
1291 nr_pages = 0;
1292 start = ktime_get();
1293
1294 ret = snapshot_write_next(snapshot);
1295 if (ret <= 0)
1296 goto out_finish;
1297
1298 for(;;) {
1299 for (i = 0; !eof && i < want; i++) {
1300 ret = swap_read_page(handle, page[ring], &hb);
1301 if (ret) {
1302 /*
1303 * On real read error, finish. On end of data,
1304 * set EOF flag and just exit the read loop.
1305 */
1306 if (handle->cur &&
1307 handle->cur->entries[handle->k]) {
1308 goto out_finish;
1309 } else {
1310 eof = 1;
1311 break;
1312 }
1313 }
1314 if (++ring >= ring_size)
1315 ring = 0;
1316 }
1317 asked += i;
1318 want -= i;
1319
1320 /*
1321 * We are out of data, wait for some more.
1322 */
1323 if (!have) {
1324 if (!asked)
1325 break;
1326
1327 ret = hib_wait_io(&hb);
1328 if (ret)
1329 goto out_finish;
1330 have += asked;
1331 asked = 0;
1332 if (eof)
1333 eof = 2;
1334 }
1335
1336 if (crc->run_threads) {
1337 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1338 atomic_set(&crc->stop, 0);
1339 crc->run_threads = 0;
1340 }
1341
1342 for (thr = 0; have && thr < nr_threads; thr++) {
1343 data[thr].cmp_len = *(size_t *)page[pg];
1344 if (unlikely(!data[thr].cmp_len ||
1345 data[thr].cmp_len >
1346 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1347 pr_err("Invalid LZO compressed length\n");
1348 ret = -1;
1349 goto out_finish;
1350 }
1351
1352 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1353 PAGE_SIZE);
1354 if (need > have) {
1355 if (eof > 1) {
1356 ret = -1;
1357 goto out_finish;
1358 }
1359 break;
1360 }
1361
1362 for (off = 0;
1363 off < LZO_HEADER + data[thr].cmp_len;
1364 off += PAGE_SIZE) {
1365 memcpy(data[thr].cmp + off,
1366 page[pg], PAGE_SIZE);
1367 have--;
1368 want++;
1369 if (++pg >= ring_size)
1370 pg = 0;
1371 }
1372
1373 atomic_set_release(&data[thr].ready, 1);
1374 wake_up(&data[thr].go);
1375 }
1376
1377 /*
1378 * Wait for more data while we are decompressing.
1379 */
1380 if (have < LZO_CMP_PAGES && asked) {
1381 ret = hib_wait_io(&hb);
1382 if (ret)
1383 goto out_finish;
1384 have += asked;
1385 asked = 0;
1386 if (eof)
1387 eof = 2;
1388 }
1389
1390 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1391 wait_event(data[thr].done,
1392 atomic_read_acquire(&data[thr].stop));
1393 atomic_set(&data[thr].stop, 0);
1394
1395 ret = data[thr].ret;
1396
1397 if (ret < 0) {
1398 pr_err("LZO decompression failed\n");
1399 goto out_finish;
1400 }
1401
1402 if (unlikely(!data[thr].unc_len ||
1403 data[thr].unc_len > LZO_UNC_SIZE ||
1404 data[thr].unc_len & (PAGE_SIZE - 1))) {
1405 pr_err("Invalid LZO uncompressed length\n");
1406 ret = -1;
1407 goto out_finish;
1408 }
1409
1410 for (off = 0;
1411 off < data[thr].unc_len; off += PAGE_SIZE) {
1412 memcpy(data_of(*snapshot),
1413 data[thr].unc + off, PAGE_SIZE);
1414
1415 if (!(nr_pages % m))
1416 pr_info("Image loading progress: %3d%%\n",
1417 nr_pages / m * 10);
1418 nr_pages++;
1419
1420 ret = snapshot_write_next(snapshot);
1421 if (ret <= 0) {
1422 crc->run_threads = thr + 1;
1423 atomic_set_release(&crc->ready, 1);
1424 wake_up(&crc->go);
1425 goto out_finish;
1426 }
1427 }
1428 }
1429
1430 crc->run_threads = thr;
1431 atomic_set_release(&crc->ready, 1);
1432 wake_up(&crc->go);
1433 }
1434
1435 out_finish:
1436 if (crc->run_threads) {
1437 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1438 atomic_set(&crc->stop, 0);
1439 }
1440 stop = ktime_get();
1441 if (!ret) {
1442 pr_info("Image loading done\n");
1443 snapshot_write_finalize(snapshot);
1444 if (!snapshot_image_loaded(snapshot))
1445 ret = -ENODATA;
1446 if (!ret) {
1447 if (swsusp_header->flags & SF_CRC32_MODE) {
1448 if(handle->crc32 != swsusp_header->crc32) {
1449 pr_err("Invalid image CRC32!\n");
1450 ret = -ENODATA;
1451 }
1452 }
1453 }
1454 }
1455 swsusp_show_speed(start, stop, nr_to_read, "Read");
1456 out_clean:
1457 hib_finish_batch(&hb);
1458 for (i = 0; i < ring_size; i++)
1459 free_page((unsigned long)page[i]);
1460 if (crc) {
1461 if (crc->thr)
1462 kthread_stop(crc->thr);
1463 kfree(crc);
1464 }
1465 if (data) {
1466 for (thr = 0; thr < nr_threads; thr++)
1467 if (data[thr].thr)
1468 kthread_stop(data[thr].thr);
1469 vfree(data);
1470 }
1471 vfree(page);
1472
1473 return ret;
1474 }
1475
1476 /**
1477 * swsusp_read - read the hibernation image.
1478 * @flags_p: flags passed by the "frozen" kernel in the image header should
1479 * be written into this memory location
1480 */
1481
swsusp_read(unsigned int * flags_p)1482 int swsusp_read(unsigned int *flags_p)
1483 {
1484 int error;
1485 struct swap_map_handle handle;
1486 struct snapshot_handle snapshot;
1487 struct swsusp_info *header;
1488
1489 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1490 error = snapshot_write_next(&snapshot);
1491 if (error < (int)PAGE_SIZE)
1492 return error < 0 ? error : -EFAULT;
1493 header = (struct swsusp_info *)data_of(snapshot);
1494 error = get_swap_reader(&handle, flags_p);
1495 if (error)
1496 goto end;
1497 if (!error)
1498 error = swap_read_page(&handle, header, NULL);
1499 if (!error) {
1500 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1501 load_image(&handle, &snapshot, header->pages - 1) :
1502 load_image_lzo(&handle, &snapshot, header->pages - 1);
1503 }
1504 swap_reader_finish(&handle);
1505 end:
1506 if (!error)
1507 pr_debug("Image successfully loaded\n");
1508 else
1509 pr_debug("Error %d resuming\n", error);
1510 return error;
1511 }
1512
1513 static void *swsusp_holder;
1514
1515 /**
1516 * swsusp_check - Check for swsusp signature in the resume device
1517 * @exclusive: Open the resume device exclusively.
1518 */
1519
swsusp_check(bool exclusive)1520 int swsusp_check(bool exclusive)
1521 {
1522 void *holder = exclusive ? &swsusp_holder : NULL;
1523 int error;
1524
1525 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
1526 holder, NULL);
1527 if (!IS_ERR(hib_resume_bdev)) {
1528 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1529 clear_page(swsusp_header);
1530 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1531 swsusp_header, NULL);
1532 if (error)
1533 goto put;
1534
1535 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1536 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1537 /* Reset swap signature now */
1538 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1539 swsusp_resume_block,
1540 swsusp_header, NULL);
1541 } else {
1542 error = -EINVAL;
1543 }
1544 if (!error && swsusp_header->flags & SF_HW_SIG &&
1545 swsusp_header->hw_sig != swsusp_hardware_signature) {
1546 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1547 swsusp_header->hw_sig, swsusp_hardware_signature);
1548 error = -EINVAL;
1549 }
1550
1551 put:
1552 if (error)
1553 blkdev_put(hib_resume_bdev, holder);
1554 else
1555 pr_debug("Image signature found, resuming\n");
1556 } else {
1557 error = PTR_ERR(hib_resume_bdev);
1558 }
1559
1560 if (error)
1561 pr_debug("Image not found (code %d)\n", error);
1562
1563 return error;
1564 }
1565
1566 /**
1567 * swsusp_close - close swap device.
1568 * @exclusive: Close the resume device which is exclusively opened.
1569 */
1570
swsusp_close(bool exclusive)1571 void swsusp_close(bool exclusive)
1572 {
1573 if (IS_ERR(hib_resume_bdev)) {
1574 pr_debug("Image device not initialised\n");
1575 return;
1576 }
1577
1578 blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
1579 }
1580
1581 /**
1582 * swsusp_unmark - Unmark swsusp signature in the resume device
1583 */
1584
1585 #ifdef CONFIG_SUSPEND
swsusp_unmark(void)1586 int swsusp_unmark(void)
1587 {
1588 int error;
1589
1590 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1591 swsusp_header, NULL);
1592 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1593 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1594 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1595 swsusp_resume_block,
1596 swsusp_header, NULL);
1597 } else {
1598 pr_err("Cannot find swsusp signature!\n");
1599 error = -ENODEV;
1600 }
1601
1602 /*
1603 * We just returned from suspend, we don't need the image any more.
1604 */
1605 free_all_swap_pages(root_swap);
1606
1607 return error;
1608 }
1609 #endif
1610
swsusp_header_init(void)1611 static int __init swsusp_header_init(void)
1612 {
1613 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1614 if (!swsusp_header)
1615 panic("Could not allocate memory for swsusp_header\n");
1616 return 0;
1617 }
1618
1619 core_initcall(swsusp_header_init);
1620