xref: /openbmc/linux/kernel/power/snapshot.c (revision ae213c44)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #define pr_fmt(fmt) "PM: " fmt
14 
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/suspend.h>
19 #include <linux/delay.h>
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/kernel.h>
23 #include <linux/pm.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/memblock.h>
27 #include <linux/nmi.h>
28 #include <linux/syscalls.h>
29 #include <linux/console.h>
30 #include <linux/highmem.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
33 #include <linux/compiler.h>
34 #include <linux/ktime.h>
35 #include <linux/set_memory.h>
36 
37 #include <linux/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
41 #include <asm/io.h>
42 
43 #include "power.h"
44 
45 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46 static bool hibernate_restore_protection;
47 static bool hibernate_restore_protection_active;
48 
49 void enable_restore_image_protection(void)
50 {
51 	hibernate_restore_protection = true;
52 }
53 
54 static inline void hibernate_restore_protection_begin(void)
55 {
56 	hibernate_restore_protection_active = hibernate_restore_protection;
57 }
58 
59 static inline void hibernate_restore_protection_end(void)
60 {
61 	hibernate_restore_protection_active = false;
62 }
63 
64 static inline void hibernate_restore_protect_page(void *page_address)
65 {
66 	if (hibernate_restore_protection_active)
67 		set_memory_ro((unsigned long)page_address, 1);
68 }
69 
70 static inline void hibernate_restore_unprotect_page(void *page_address)
71 {
72 	if (hibernate_restore_protection_active)
73 		set_memory_rw((unsigned long)page_address, 1);
74 }
75 #else
76 static inline void hibernate_restore_protection_begin(void) {}
77 static inline void hibernate_restore_protection_end(void) {}
78 static inline void hibernate_restore_protect_page(void *page_address) {}
79 static inline void hibernate_restore_unprotect_page(void *page_address) {}
80 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
81 
82 static int swsusp_page_is_free(struct page *);
83 static void swsusp_set_page_forbidden(struct page *);
84 static void swsusp_unset_page_forbidden(struct page *);
85 
86 /*
87  * Number of bytes to reserve for memory allocations made by device drivers
88  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
89  * cause image creation to fail (tunable via /sys/power/reserved_size).
90  */
91 unsigned long reserved_size;
92 
93 void __init hibernate_reserved_size_init(void)
94 {
95 	reserved_size = SPARE_PAGES * PAGE_SIZE;
96 }
97 
98 /*
99  * Preferred image size in bytes (tunable via /sys/power/image_size).
100  * When it is set to N, swsusp will do its best to ensure the image
101  * size will not exceed N bytes, but if that is impossible, it will
102  * try to create the smallest image possible.
103  */
104 unsigned long image_size;
105 
106 void __init hibernate_image_size_init(void)
107 {
108 	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
109 }
110 
111 /*
112  * List of PBEs needed for restoring the pages that were allocated before
113  * the suspend and included in the suspend image, but have also been
114  * allocated by the "resume" kernel, so their contents cannot be written
115  * directly to their "original" page frames.
116  */
117 struct pbe *restore_pblist;
118 
119 /* struct linked_page is used to build chains of pages */
120 
121 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
122 
123 struct linked_page {
124 	struct linked_page *next;
125 	char data[LINKED_PAGE_DATA_SIZE];
126 } __packed;
127 
128 /*
129  * List of "safe" pages (ie. pages that were not used by the image kernel
130  * before hibernation) that may be used as temporary storage for image kernel
131  * memory contents.
132  */
133 static struct linked_page *safe_pages_list;
134 
135 /* Pointer to an auxiliary buffer (1 page) */
136 static void *buffer;
137 
138 #define PG_ANY		0
139 #define PG_SAFE		1
140 #define PG_UNSAFE_CLEAR	1
141 #define PG_UNSAFE_KEEP	0
142 
143 static unsigned int allocated_unsafe_pages;
144 
145 /**
146  * get_image_page - Allocate a page for a hibernation image.
147  * @gfp_mask: GFP mask for the allocation.
148  * @safe_needed: Get pages that were not used before hibernation (restore only)
149  *
150  * During image restoration, for storing the PBE list and the image data, we can
151  * only use memory pages that do not conflict with the pages used before
152  * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
153  * using allocated_unsafe_pages.
154  *
155  * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156  * swsusp_free() can release it.
157  */
158 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
159 {
160 	void *res;
161 
162 	res = (void *)get_zeroed_page(gfp_mask);
163 	if (safe_needed)
164 		while (res && swsusp_page_is_free(virt_to_page(res))) {
165 			/* The page is unsafe, mark it for swsusp_free() */
166 			swsusp_set_page_forbidden(virt_to_page(res));
167 			allocated_unsafe_pages++;
168 			res = (void *)get_zeroed_page(gfp_mask);
169 		}
170 	if (res) {
171 		swsusp_set_page_forbidden(virt_to_page(res));
172 		swsusp_set_page_free(virt_to_page(res));
173 	}
174 	return res;
175 }
176 
177 static void *__get_safe_page(gfp_t gfp_mask)
178 {
179 	if (safe_pages_list) {
180 		void *ret = safe_pages_list;
181 
182 		safe_pages_list = safe_pages_list->next;
183 		memset(ret, 0, PAGE_SIZE);
184 		return ret;
185 	}
186 	return get_image_page(gfp_mask, PG_SAFE);
187 }
188 
189 unsigned long get_safe_page(gfp_t gfp_mask)
190 {
191 	return (unsigned long)__get_safe_page(gfp_mask);
192 }
193 
194 static struct page *alloc_image_page(gfp_t gfp_mask)
195 {
196 	struct page *page;
197 
198 	page = alloc_page(gfp_mask);
199 	if (page) {
200 		swsusp_set_page_forbidden(page);
201 		swsusp_set_page_free(page);
202 	}
203 	return page;
204 }
205 
206 static void recycle_safe_page(void *page_address)
207 {
208 	struct linked_page *lp = page_address;
209 
210 	lp->next = safe_pages_list;
211 	safe_pages_list = lp;
212 }
213 
214 /**
215  * free_image_page - Free a page allocated for hibernation image.
216  * @addr: Address of the page to free.
217  * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
218  *
219  * The page to free should have been allocated by get_image_page() (page flags
220  * set by it are affected).
221  */
222 static inline void free_image_page(void *addr, int clear_nosave_free)
223 {
224 	struct page *page;
225 
226 	BUG_ON(!virt_addr_valid(addr));
227 
228 	page = virt_to_page(addr);
229 
230 	swsusp_unset_page_forbidden(page);
231 	if (clear_nosave_free)
232 		swsusp_unset_page_free(page);
233 
234 	__free_page(page);
235 }
236 
237 static inline void free_list_of_pages(struct linked_page *list,
238 				      int clear_page_nosave)
239 {
240 	while (list) {
241 		struct linked_page *lp = list->next;
242 
243 		free_image_page(list, clear_page_nosave);
244 		list = lp;
245 	}
246 }
247 
248 /*
249  * struct chain_allocator is used for allocating small objects out of
250  * a linked list of pages called 'the chain'.
251  *
252  * The chain grows each time when there is no room for a new object in
253  * the current page.  The allocated objects cannot be freed individually.
254  * It is only possible to free them all at once, by freeing the entire
255  * chain.
256  *
257  * NOTE: The chain allocator may be inefficient if the allocated objects
258  * are not much smaller than PAGE_SIZE.
259  */
260 struct chain_allocator {
261 	struct linked_page *chain;	/* the chain */
262 	unsigned int used_space;	/* total size of objects allocated out
263 					   of the current page */
264 	gfp_t gfp_mask;		/* mask for allocating pages */
265 	int safe_needed;	/* if set, only "safe" pages are allocated */
266 };
267 
268 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269 		       int safe_needed)
270 {
271 	ca->chain = NULL;
272 	ca->used_space = LINKED_PAGE_DATA_SIZE;
273 	ca->gfp_mask = gfp_mask;
274 	ca->safe_needed = safe_needed;
275 }
276 
277 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278 {
279 	void *ret;
280 
281 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 		struct linked_page *lp;
283 
284 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 					get_image_page(ca->gfp_mask, PG_ANY);
286 		if (!lp)
287 			return NULL;
288 
289 		lp->next = ca->chain;
290 		ca->chain = lp;
291 		ca->used_space = 0;
292 	}
293 	ret = ca->chain->data + ca->used_space;
294 	ca->used_space += size;
295 	return ret;
296 }
297 
298 /**
299  * Data types related to memory bitmaps.
300  *
301  * Memory bitmap is a structure consiting of many linked lists of
302  * objects.  The main list's elements are of type struct zone_bitmap
303  * and each of them corresonds to one zone.  For each zone bitmap
304  * object there is a list of objects of type struct bm_block that
305  * represent each blocks of bitmap in which information is stored.
306  *
307  * struct memory_bitmap contains a pointer to the main list of zone
308  * bitmap objects, a struct bm_position used for browsing the bitmap,
309  * and a pointer to the list of pages used for allocating all of the
310  * zone bitmap objects and bitmap block objects.
311  *
312  * NOTE: It has to be possible to lay out the bitmap in memory
313  * using only allocations of order 0.  Additionally, the bitmap is
314  * designed to work with arbitrary number of zones (this is over the
315  * top for now, but let's avoid making unnecessary assumptions ;-).
316  *
317  * struct zone_bitmap contains a pointer to a list of bitmap block
318  * objects and a pointer to the bitmap block object that has been
319  * most recently used for setting bits.  Additionally, it contains the
320  * PFNs that correspond to the start and end of the represented zone.
321  *
322  * struct bm_block contains a pointer to the memory page in which
323  * information is stored (in the form of a block of bitmap)
324  * It also contains the pfns that correspond to the start and end of
325  * the represented memory area.
326  *
327  * The memory bitmap is organized as a radix tree to guarantee fast random
328  * access to the bits. There is one radix tree for each zone (as returned
329  * from create_mem_extents).
330  *
331  * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332  * two linked lists for the nodes of the tree, one for the inner nodes and
333  * one for the leave nodes. The linked leave nodes are used for fast linear
334  * access of the memory bitmap.
335  *
336  * The struct rtree_node represents one node of the radix tree.
337  */
338 
339 #define BM_END_OF_MAP	(~0UL)
340 
341 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
342 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
343 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
344 
345 /*
346  * struct rtree_node is a wrapper struct to link the nodes
347  * of the rtree together for easy linear iteration over
348  * bits and easy freeing
349  */
350 struct rtree_node {
351 	struct list_head list;
352 	unsigned long *data;
353 };
354 
355 /*
356  * struct mem_zone_bm_rtree represents a bitmap used for one
357  * populated memory zone.
358  */
359 struct mem_zone_bm_rtree {
360 	struct list_head list;		/* Link Zones together         */
361 	struct list_head nodes;		/* Radix Tree inner nodes      */
362 	struct list_head leaves;	/* Radix Tree leaves           */
363 	unsigned long start_pfn;	/* Zone start page frame       */
364 	unsigned long end_pfn;		/* Zone end page frame + 1     */
365 	struct rtree_node *rtree;	/* Radix Tree Root             */
366 	int levels;			/* Number of Radix Tree Levels */
367 	unsigned int blocks;		/* Number of Bitmap Blocks     */
368 };
369 
370 /* strcut bm_position is used for browsing memory bitmaps */
371 
372 struct bm_position {
373 	struct mem_zone_bm_rtree *zone;
374 	struct rtree_node *node;
375 	unsigned long node_pfn;
376 	int node_bit;
377 };
378 
379 struct memory_bitmap {
380 	struct list_head zones;
381 	struct linked_page *p_list;	/* list of pages used to store zone
382 					   bitmap objects and bitmap block
383 					   objects */
384 	struct bm_position cur;	/* most recently used bit position */
385 };
386 
387 /* Functions that operate on memory bitmaps */
388 
389 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
390 #if BITS_PER_LONG == 32
391 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
392 #else
393 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
394 #endif
395 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396 
397 /**
398  * alloc_rtree_node - Allocate a new node and add it to the radix tree.
399  *
400  * This function is used to allocate inner nodes as well as the
401  * leave nodes of the radix tree. It also adds the node to the
402  * corresponding linked list passed in by the *list parameter.
403  */
404 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 					   struct chain_allocator *ca,
406 					   struct list_head *list)
407 {
408 	struct rtree_node *node;
409 
410 	node = chain_alloc(ca, sizeof(struct rtree_node));
411 	if (!node)
412 		return NULL;
413 
414 	node->data = get_image_page(gfp_mask, safe_needed);
415 	if (!node->data)
416 		return NULL;
417 
418 	list_add_tail(&node->list, list);
419 
420 	return node;
421 }
422 
423 /**
424  * add_rtree_block - Add a new leave node to the radix tree.
425  *
426  * The leave nodes need to be allocated in order to keep the leaves
427  * linked list in order. This is guaranteed by the zone->blocks
428  * counter.
429  */
430 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 			   int safe_needed, struct chain_allocator *ca)
432 {
433 	struct rtree_node *node, *block, **dst;
434 	unsigned int levels_needed, block_nr;
435 	int i;
436 
437 	block_nr = zone->blocks;
438 	levels_needed = 0;
439 
440 	/* How many levels do we need for this block nr? */
441 	while (block_nr) {
442 		levels_needed += 1;
443 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 	}
445 
446 	/* Make sure the rtree has enough levels */
447 	for (i = zone->levels; i < levels_needed; i++) {
448 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449 					&zone->nodes);
450 		if (!node)
451 			return -ENOMEM;
452 
453 		node->data[0] = (unsigned long)zone->rtree;
454 		zone->rtree = node;
455 		zone->levels += 1;
456 	}
457 
458 	/* Allocate new block */
459 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460 	if (!block)
461 		return -ENOMEM;
462 
463 	/* Now walk the rtree to insert the block */
464 	node = zone->rtree;
465 	dst = &zone->rtree;
466 	block_nr = zone->blocks;
467 	for (i = zone->levels; i > 0; i--) {
468 		int index;
469 
470 		if (!node) {
471 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472 						&zone->nodes);
473 			if (!node)
474 				return -ENOMEM;
475 			*dst = node;
476 		}
477 
478 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 		index &= BM_RTREE_LEVEL_MASK;
480 		dst = (struct rtree_node **)&((*dst)->data[index]);
481 		node = *dst;
482 	}
483 
484 	zone->blocks += 1;
485 	*dst = block;
486 
487 	return 0;
488 }
489 
490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 			       int clear_nosave_free);
492 
493 /**
494  * create_zone_bm_rtree - Create a radix tree for one zone.
495  *
496  * Allocated the mem_zone_bm_rtree structure and initializes it.
497  * This function also allocated and builds the radix tree for the
498  * zone.
499  */
500 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501 						      int safe_needed,
502 						      struct chain_allocator *ca,
503 						      unsigned long start,
504 						      unsigned long end)
505 {
506 	struct mem_zone_bm_rtree *zone;
507 	unsigned int i, nr_blocks;
508 	unsigned long pages;
509 
510 	pages = end - start;
511 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512 	if (!zone)
513 		return NULL;
514 
515 	INIT_LIST_HEAD(&zone->nodes);
516 	INIT_LIST_HEAD(&zone->leaves);
517 	zone->start_pfn = start;
518 	zone->end_pfn = end;
519 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520 
521 	for (i = 0; i < nr_blocks; i++) {
522 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524 			return NULL;
525 		}
526 	}
527 
528 	return zone;
529 }
530 
531 /**
532  * free_zone_bm_rtree - Free the memory of the radix tree.
533  *
534  * Free all node pages of the radix tree. The mem_zone_bm_rtree
535  * structure itself is not freed here nor are the rtree_node
536  * structs.
537  */
538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 			       int clear_nosave_free)
540 {
541 	struct rtree_node *node;
542 
543 	list_for_each_entry(node, &zone->nodes, list)
544 		free_image_page(node->data, clear_nosave_free);
545 
546 	list_for_each_entry(node, &zone->leaves, list)
547 		free_image_page(node->data, clear_nosave_free);
548 }
549 
550 static void memory_bm_position_reset(struct memory_bitmap *bm)
551 {
552 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553 				  list);
554 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 				  struct rtree_node, list);
556 	bm->cur.node_pfn = 0;
557 	bm->cur.node_bit = 0;
558 }
559 
560 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561 
562 struct mem_extent {
563 	struct list_head hook;
564 	unsigned long start;
565 	unsigned long end;
566 };
567 
568 /**
569  * free_mem_extents - Free a list of memory extents.
570  * @list: List of extents to free.
571  */
572 static void free_mem_extents(struct list_head *list)
573 {
574 	struct mem_extent *ext, *aux;
575 
576 	list_for_each_entry_safe(ext, aux, list, hook) {
577 		list_del(&ext->hook);
578 		kfree(ext);
579 	}
580 }
581 
582 /**
583  * create_mem_extents - Create a list of memory extents.
584  * @list: List to put the extents into.
585  * @gfp_mask: Mask to use for memory allocations.
586  *
587  * The extents represent contiguous ranges of PFNs.
588  */
589 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
590 {
591 	struct zone *zone;
592 
593 	INIT_LIST_HEAD(list);
594 
595 	for_each_populated_zone(zone) {
596 		unsigned long zone_start, zone_end;
597 		struct mem_extent *ext, *cur, *aux;
598 
599 		zone_start = zone->zone_start_pfn;
600 		zone_end = zone_end_pfn(zone);
601 
602 		list_for_each_entry(ext, list, hook)
603 			if (zone_start <= ext->end)
604 				break;
605 
606 		if (&ext->hook == list || zone_end < ext->start) {
607 			/* New extent is necessary */
608 			struct mem_extent *new_ext;
609 
610 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611 			if (!new_ext) {
612 				free_mem_extents(list);
613 				return -ENOMEM;
614 			}
615 			new_ext->start = zone_start;
616 			new_ext->end = zone_end;
617 			list_add_tail(&new_ext->hook, &ext->hook);
618 			continue;
619 		}
620 
621 		/* Merge this zone's range of PFNs with the existing one */
622 		if (zone_start < ext->start)
623 			ext->start = zone_start;
624 		if (zone_end > ext->end)
625 			ext->end = zone_end;
626 
627 		/* More merging may be possible */
628 		cur = ext;
629 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 			if (zone_end < cur->start)
631 				break;
632 			if (zone_end < cur->end)
633 				ext->end = cur->end;
634 			list_del(&cur->hook);
635 			kfree(cur);
636 		}
637 	}
638 
639 	return 0;
640 }
641 
642 /**
643  * memory_bm_create - Allocate memory for a memory bitmap.
644  */
645 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646 			    int safe_needed)
647 {
648 	struct chain_allocator ca;
649 	struct list_head mem_extents;
650 	struct mem_extent *ext;
651 	int error;
652 
653 	chain_init(&ca, gfp_mask, safe_needed);
654 	INIT_LIST_HEAD(&bm->zones);
655 
656 	error = create_mem_extents(&mem_extents, gfp_mask);
657 	if (error)
658 		return error;
659 
660 	list_for_each_entry(ext, &mem_extents, hook) {
661 		struct mem_zone_bm_rtree *zone;
662 
663 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 					    ext->start, ext->end);
665 		if (!zone) {
666 			error = -ENOMEM;
667 			goto Error;
668 		}
669 		list_add_tail(&zone->list, &bm->zones);
670 	}
671 
672 	bm->p_list = ca.chain;
673 	memory_bm_position_reset(bm);
674  Exit:
675 	free_mem_extents(&mem_extents);
676 	return error;
677 
678  Error:
679 	bm->p_list = ca.chain;
680 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
681 	goto Exit;
682 }
683 
684 /**
685  * memory_bm_free - Free memory occupied by the memory bitmap.
686  * @bm: Memory bitmap.
687  */
688 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689 {
690 	struct mem_zone_bm_rtree *zone;
691 
692 	list_for_each_entry(zone, &bm->zones, list)
693 		free_zone_bm_rtree(zone, clear_nosave_free);
694 
695 	free_list_of_pages(bm->p_list, clear_nosave_free);
696 
697 	INIT_LIST_HEAD(&bm->zones);
698 }
699 
700 /**
701  * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
702  *
703  * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704  * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
705  *
706  * Walk the radix tree to find the page containing the bit that represents @pfn
707  * and return the position of the bit in @addr and @bit_nr.
708  */
709 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 			      void **addr, unsigned int *bit_nr)
711 {
712 	struct mem_zone_bm_rtree *curr, *zone;
713 	struct rtree_node *node;
714 	int i, block_nr;
715 
716 	zone = bm->cur.zone;
717 
718 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719 		goto zone_found;
720 
721 	zone = NULL;
722 
723 	/* Find the right zone */
724 	list_for_each_entry(curr, &bm->zones, list) {
725 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726 			zone = curr;
727 			break;
728 		}
729 	}
730 
731 	if (!zone)
732 		return -EFAULT;
733 
734 zone_found:
735 	/*
736 	 * We have found the zone. Now walk the radix tree to find the leaf node
737 	 * for our PFN.
738 	 */
739 	node = bm->cur.node;
740 	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741 		goto node_found;
742 
743 	node      = zone->rtree;
744 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745 
746 	for (i = zone->levels; i > 0; i--) {
747 		int index;
748 
749 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750 		index &= BM_RTREE_LEVEL_MASK;
751 		BUG_ON(node->data[index] == 0);
752 		node = (struct rtree_node *)node->data[index];
753 	}
754 
755 node_found:
756 	/* Update last position */
757 	bm->cur.zone = zone;
758 	bm->cur.node = node;
759 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760 
761 	/* Set return values */
762 	*addr = node->data;
763 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764 
765 	return 0;
766 }
767 
768 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769 {
770 	void *addr;
771 	unsigned int bit;
772 	int error;
773 
774 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775 	BUG_ON(error);
776 	set_bit(bit, addr);
777 }
778 
779 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780 {
781 	void *addr;
782 	unsigned int bit;
783 	int error;
784 
785 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786 	if (!error)
787 		set_bit(bit, addr);
788 
789 	return error;
790 }
791 
792 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793 {
794 	void *addr;
795 	unsigned int bit;
796 	int error;
797 
798 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 	BUG_ON(error);
800 	clear_bit(bit, addr);
801 }
802 
803 static void memory_bm_clear_current(struct memory_bitmap *bm)
804 {
805 	int bit;
806 
807 	bit = max(bm->cur.node_bit - 1, 0);
808 	clear_bit(bit, bm->cur.node->data);
809 }
810 
811 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812 {
813 	void *addr;
814 	unsigned int bit;
815 	int error;
816 
817 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818 	BUG_ON(error);
819 	return test_bit(bit, addr);
820 }
821 
822 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823 {
824 	void *addr;
825 	unsigned int bit;
826 
827 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
828 }
829 
830 /*
831  * rtree_next_node - Jump to the next leaf node.
832  *
833  * Set the position to the beginning of the next node in the
834  * memory bitmap. This is either the next node in the current
835  * zone's radix tree or the first node in the radix tree of the
836  * next zone.
837  *
838  * Return true if there is a next node, false otherwise.
839  */
840 static bool rtree_next_node(struct memory_bitmap *bm)
841 {
842 	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843 		bm->cur.node = list_entry(bm->cur.node->list.next,
844 					  struct rtree_node, list);
845 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846 		bm->cur.node_bit  = 0;
847 		touch_softlockup_watchdog();
848 		return true;
849 	}
850 
851 	/* No more nodes, goto next zone */
852 	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853 		bm->cur.zone = list_entry(bm->cur.zone->list.next,
854 				  struct mem_zone_bm_rtree, list);
855 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856 					  struct rtree_node, list);
857 		bm->cur.node_pfn = 0;
858 		bm->cur.node_bit = 0;
859 		return true;
860 	}
861 
862 	/* No more zones */
863 	return false;
864 }
865 
866 /**
867  * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
868  * @bm: Memory bitmap.
869  *
870  * Starting from the last returned position this function searches for the next
871  * set bit in @bm and returns the PFN represented by it.  If no more bits are
872  * set, BM_END_OF_MAP is returned.
873  *
874  * It is required to run memory_bm_position_reset() before the first call to
875  * this function for the given memory bitmap.
876  */
877 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
878 {
879 	unsigned long bits, pfn, pages;
880 	int bit;
881 
882 	do {
883 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885 		bit	  = find_next_bit(bm->cur.node->data, bits,
886 					  bm->cur.node_bit);
887 		if (bit < bits) {
888 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889 			bm->cur.node_bit = bit + 1;
890 			return pfn;
891 		}
892 	} while (rtree_next_node(bm));
893 
894 	return BM_END_OF_MAP;
895 }
896 
897 /*
898  * This structure represents a range of page frames the contents of which
899  * should not be saved during hibernation.
900  */
901 struct nosave_region {
902 	struct list_head list;
903 	unsigned long start_pfn;
904 	unsigned long end_pfn;
905 };
906 
907 static LIST_HEAD(nosave_regions);
908 
909 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910 {
911 	struct rtree_node *node;
912 
913 	list_for_each_entry(node, &zone->nodes, list)
914 		recycle_safe_page(node->data);
915 
916 	list_for_each_entry(node, &zone->leaves, list)
917 		recycle_safe_page(node->data);
918 }
919 
920 static void memory_bm_recycle(struct memory_bitmap *bm)
921 {
922 	struct mem_zone_bm_rtree *zone;
923 	struct linked_page *p_list;
924 
925 	list_for_each_entry(zone, &bm->zones, list)
926 		recycle_zone_bm_rtree(zone);
927 
928 	p_list = bm->p_list;
929 	while (p_list) {
930 		struct linked_page *lp = p_list;
931 
932 		p_list = lp->next;
933 		recycle_safe_page(lp);
934 	}
935 }
936 
937 /**
938  * register_nosave_region - Register a region of unsaveable memory.
939  *
940  * Register a range of page frames the contents of which should not be saved
941  * during hibernation (to be used in the early initialization code).
942  */
943 void __init __register_nosave_region(unsigned long start_pfn,
944 				     unsigned long end_pfn, int use_kmalloc)
945 {
946 	struct nosave_region *region;
947 
948 	if (start_pfn >= end_pfn)
949 		return;
950 
951 	if (!list_empty(&nosave_regions)) {
952 		/* Try to extend the previous region (they should be sorted) */
953 		region = list_entry(nosave_regions.prev,
954 					struct nosave_region, list);
955 		if (region->end_pfn == start_pfn) {
956 			region->end_pfn = end_pfn;
957 			goto Report;
958 		}
959 	}
960 	if (use_kmalloc) {
961 		/* During init, this shouldn't fail */
962 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963 		BUG_ON(!region);
964 	} else {
965 		/* This allocation cannot fail */
966 		region = memblock_alloc(sizeof(struct nosave_region),
967 					SMP_CACHE_BYTES);
968 		if (!region)
969 			panic("%s: Failed to allocate %zu bytes\n", __func__,
970 			      sizeof(struct nosave_region));
971 	}
972 	region->start_pfn = start_pfn;
973 	region->end_pfn = end_pfn;
974 	list_add_tail(&region->list, &nosave_regions);
975  Report:
976 	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
977 		(unsigned long long) start_pfn << PAGE_SHIFT,
978 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
979 }
980 
981 /*
982  * Set bits in this map correspond to the page frames the contents of which
983  * should not be saved during the suspend.
984  */
985 static struct memory_bitmap *forbidden_pages_map;
986 
987 /* Set bits in this map correspond to free page frames. */
988 static struct memory_bitmap *free_pages_map;
989 
990 /*
991  * Each page frame allocated for creating the image is marked by setting the
992  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
993  */
994 
995 void swsusp_set_page_free(struct page *page)
996 {
997 	if (free_pages_map)
998 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
999 }
1000 
1001 static int swsusp_page_is_free(struct page *page)
1002 {
1003 	return free_pages_map ?
1004 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1005 }
1006 
1007 void swsusp_unset_page_free(struct page *page)
1008 {
1009 	if (free_pages_map)
1010 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1011 }
1012 
1013 static void swsusp_set_page_forbidden(struct page *page)
1014 {
1015 	if (forbidden_pages_map)
1016 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1017 }
1018 
1019 int swsusp_page_is_forbidden(struct page *page)
1020 {
1021 	return forbidden_pages_map ?
1022 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1023 }
1024 
1025 static void swsusp_unset_page_forbidden(struct page *page)
1026 {
1027 	if (forbidden_pages_map)
1028 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1029 }
1030 
1031 /**
1032  * mark_nosave_pages - Mark pages that should not be saved.
1033  * @bm: Memory bitmap.
1034  *
1035  * Set the bits in @bm that correspond to the page frames the contents of which
1036  * should not be saved.
1037  */
1038 static void mark_nosave_pages(struct memory_bitmap *bm)
1039 {
1040 	struct nosave_region *region;
1041 
1042 	if (list_empty(&nosave_regions))
1043 		return;
1044 
1045 	list_for_each_entry(region, &nosave_regions, list) {
1046 		unsigned long pfn;
1047 
1048 		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1049 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1050 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1051 				- 1);
1052 
1053 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1054 			if (pfn_valid(pfn)) {
1055 				/*
1056 				 * It is safe to ignore the result of
1057 				 * mem_bm_set_bit_check() here, since we won't
1058 				 * touch the PFNs for which the error is
1059 				 * returned anyway.
1060 				 */
1061 				mem_bm_set_bit_check(bm, pfn);
1062 			}
1063 	}
1064 }
1065 
1066 /**
1067  * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1068  *
1069  * Create bitmaps needed for marking page frames that should not be saved and
1070  * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1071  * only modified if everything goes well, because we don't want the bits to be
1072  * touched before both bitmaps are set up.
1073  */
1074 int create_basic_memory_bitmaps(void)
1075 {
1076 	struct memory_bitmap *bm1, *bm2;
1077 	int error = 0;
1078 
1079 	if (forbidden_pages_map && free_pages_map)
1080 		return 0;
1081 	else
1082 		BUG_ON(forbidden_pages_map || free_pages_map);
1083 
1084 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1085 	if (!bm1)
1086 		return -ENOMEM;
1087 
1088 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1089 	if (error)
1090 		goto Free_first_object;
1091 
1092 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1093 	if (!bm2)
1094 		goto Free_first_bitmap;
1095 
1096 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1097 	if (error)
1098 		goto Free_second_object;
1099 
1100 	forbidden_pages_map = bm1;
1101 	free_pages_map = bm2;
1102 	mark_nosave_pages(forbidden_pages_map);
1103 
1104 	pr_debug("Basic memory bitmaps created\n");
1105 
1106 	return 0;
1107 
1108  Free_second_object:
1109 	kfree(bm2);
1110  Free_first_bitmap:
1111  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1112  Free_first_object:
1113 	kfree(bm1);
1114 	return -ENOMEM;
1115 }
1116 
1117 /**
1118  * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1119  *
1120  * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1121  * auxiliary pointers are necessary so that the bitmaps themselves are not
1122  * referred to while they are being freed.
1123  */
1124 void free_basic_memory_bitmaps(void)
1125 {
1126 	struct memory_bitmap *bm1, *bm2;
1127 
1128 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1129 		return;
1130 
1131 	bm1 = forbidden_pages_map;
1132 	bm2 = free_pages_map;
1133 	forbidden_pages_map = NULL;
1134 	free_pages_map = NULL;
1135 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1136 	kfree(bm1);
1137 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1138 	kfree(bm2);
1139 
1140 	pr_debug("Basic memory bitmaps freed\n");
1141 }
1142 
1143 void clear_free_pages(void)
1144 {
1145 #ifdef CONFIG_PAGE_POISONING_ZERO
1146 	struct memory_bitmap *bm = free_pages_map;
1147 	unsigned long pfn;
1148 
1149 	if (WARN_ON(!(free_pages_map)))
1150 		return;
1151 
1152 	memory_bm_position_reset(bm);
1153 	pfn = memory_bm_next_pfn(bm);
1154 	while (pfn != BM_END_OF_MAP) {
1155 		if (pfn_valid(pfn))
1156 			clear_highpage(pfn_to_page(pfn));
1157 
1158 		pfn = memory_bm_next_pfn(bm);
1159 	}
1160 	memory_bm_position_reset(bm);
1161 	pr_info("free pages cleared after restore\n");
1162 #endif /* PAGE_POISONING_ZERO */
1163 }
1164 
1165 /**
1166  * snapshot_additional_pages - Estimate the number of extra pages needed.
1167  * @zone: Memory zone to carry out the computation for.
1168  *
1169  * Estimate the number of additional pages needed for setting up a hibernation
1170  * image data structures for @zone (usually, the returned value is greater than
1171  * the exact number).
1172  */
1173 unsigned int snapshot_additional_pages(struct zone *zone)
1174 {
1175 	unsigned int rtree, nodes;
1176 
1177 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1178 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1179 			      LINKED_PAGE_DATA_SIZE);
1180 	while (nodes > 1) {
1181 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1182 		rtree += nodes;
1183 	}
1184 
1185 	return 2 * rtree;
1186 }
1187 
1188 #ifdef CONFIG_HIGHMEM
1189 /**
1190  * count_free_highmem_pages - Compute the total number of free highmem pages.
1191  *
1192  * The returned number is system-wide.
1193  */
1194 static unsigned int count_free_highmem_pages(void)
1195 {
1196 	struct zone *zone;
1197 	unsigned int cnt = 0;
1198 
1199 	for_each_populated_zone(zone)
1200 		if (is_highmem(zone))
1201 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1202 
1203 	return cnt;
1204 }
1205 
1206 /**
1207  * saveable_highmem_page - Check if a highmem page is saveable.
1208  *
1209  * Determine whether a highmem page should be included in a hibernation image.
1210  *
1211  * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1212  * and it isn't part of a free chunk of pages.
1213  */
1214 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1215 {
1216 	struct page *page;
1217 
1218 	if (!pfn_valid(pfn))
1219 		return NULL;
1220 
1221 	page = pfn_to_online_page(pfn);
1222 	if (!page || page_zone(page) != zone)
1223 		return NULL;
1224 
1225 	BUG_ON(!PageHighMem(page));
1226 
1227 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1228 		return NULL;
1229 
1230 	if (PageReserved(page) || PageOffline(page))
1231 		return NULL;
1232 
1233 	if (page_is_guard(page))
1234 		return NULL;
1235 
1236 	return page;
1237 }
1238 
1239 /**
1240  * count_highmem_pages - Compute the total number of saveable highmem pages.
1241  */
1242 static unsigned int count_highmem_pages(void)
1243 {
1244 	struct zone *zone;
1245 	unsigned int n = 0;
1246 
1247 	for_each_populated_zone(zone) {
1248 		unsigned long pfn, max_zone_pfn;
1249 
1250 		if (!is_highmem(zone))
1251 			continue;
1252 
1253 		mark_free_pages(zone);
1254 		max_zone_pfn = zone_end_pfn(zone);
1255 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1256 			if (saveable_highmem_page(zone, pfn))
1257 				n++;
1258 	}
1259 	return n;
1260 }
1261 #else
1262 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1263 {
1264 	return NULL;
1265 }
1266 #endif /* CONFIG_HIGHMEM */
1267 
1268 /**
1269  * saveable_page - Check if the given page is saveable.
1270  *
1271  * Determine whether a non-highmem page should be included in a hibernation
1272  * image.
1273  *
1274  * We should save the page if it isn't Nosave, and is not in the range
1275  * of pages statically defined as 'unsaveable', and it isn't part of
1276  * a free chunk of pages.
1277  */
1278 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1279 {
1280 	struct page *page;
1281 
1282 	if (!pfn_valid(pfn))
1283 		return NULL;
1284 
1285 	page = pfn_to_online_page(pfn);
1286 	if (!page || page_zone(page) != zone)
1287 		return NULL;
1288 
1289 	BUG_ON(PageHighMem(page));
1290 
1291 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1292 		return NULL;
1293 
1294 	if (PageOffline(page))
1295 		return NULL;
1296 
1297 	if (PageReserved(page)
1298 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1299 		return NULL;
1300 
1301 	if (page_is_guard(page))
1302 		return NULL;
1303 
1304 	return page;
1305 }
1306 
1307 /**
1308  * count_data_pages - Compute the total number of saveable non-highmem pages.
1309  */
1310 static unsigned int count_data_pages(void)
1311 {
1312 	struct zone *zone;
1313 	unsigned long pfn, max_zone_pfn;
1314 	unsigned int n = 0;
1315 
1316 	for_each_populated_zone(zone) {
1317 		if (is_highmem(zone))
1318 			continue;
1319 
1320 		mark_free_pages(zone);
1321 		max_zone_pfn = zone_end_pfn(zone);
1322 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1323 			if (saveable_page(zone, pfn))
1324 				n++;
1325 	}
1326 	return n;
1327 }
1328 
1329 /*
1330  * This is needed, because copy_page and memcpy are not usable for copying
1331  * task structs.
1332  */
1333 static inline void do_copy_page(long *dst, long *src)
1334 {
1335 	int n;
1336 
1337 	for (n = PAGE_SIZE / sizeof(long); n; n--)
1338 		*dst++ = *src++;
1339 }
1340 
1341 /**
1342  * safe_copy_page - Copy a page in a safe way.
1343  *
1344  * Check if the page we are going to copy is marked as present in the kernel
1345  * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1346  * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1347  * always returns 'true'.
1348  */
1349 static void safe_copy_page(void *dst, struct page *s_page)
1350 {
1351 	if (kernel_page_present(s_page)) {
1352 		do_copy_page(dst, page_address(s_page));
1353 	} else {
1354 		kernel_map_pages(s_page, 1, 1);
1355 		do_copy_page(dst, page_address(s_page));
1356 		kernel_map_pages(s_page, 1, 0);
1357 	}
1358 }
1359 
1360 #ifdef CONFIG_HIGHMEM
1361 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1362 {
1363 	return is_highmem(zone) ?
1364 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1365 }
1366 
1367 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1368 {
1369 	struct page *s_page, *d_page;
1370 	void *src, *dst;
1371 
1372 	s_page = pfn_to_page(src_pfn);
1373 	d_page = pfn_to_page(dst_pfn);
1374 	if (PageHighMem(s_page)) {
1375 		src = kmap_atomic(s_page);
1376 		dst = kmap_atomic(d_page);
1377 		do_copy_page(dst, src);
1378 		kunmap_atomic(dst);
1379 		kunmap_atomic(src);
1380 	} else {
1381 		if (PageHighMem(d_page)) {
1382 			/*
1383 			 * The page pointed to by src may contain some kernel
1384 			 * data modified by kmap_atomic()
1385 			 */
1386 			safe_copy_page(buffer, s_page);
1387 			dst = kmap_atomic(d_page);
1388 			copy_page(dst, buffer);
1389 			kunmap_atomic(dst);
1390 		} else {
1391 			safe_copy_page(page_address(d_page), s_page);
1392 		}
1393 	}
1394 }
1395 #else
1396 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1397 
1398 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1399 {
1400 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1401 				pfn_to_page(src_pfn));
1402 }
1403 #endif /* CONFIG_HIGHMEM */
1404 
1405 static void copy_data_pages(struct memory_bitmap *copy_bm,
1406 			    struct memory_bitmap *orig_bm)
1407 {
1408 	struct zone *zone;
1409 	unsigned long pfn;
1410 
1411 	for_each_populated_zone(zone) {
1412 		unsigned long max_zone_pfn;
1413 
1414 		mark_free_pages(zone);
1415 		max_zone_pfn = zone_end_pfn(zone);
1416 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1417 			if (page_is_saveable(zone, pfn))
1418 				memory_bm_set_bit(orig_bm, pfn);
1419 	}
1420 	memory_bm_position_reset(orig_bm);
1421 	memory_bm_position_reset(copy_bm);
1422 	for(;;) {
1423 		pfn = memory_bm_next_pfn(orig_bm);
1424 		if (unlikely(pfn == BM_END_OF_MAP))
1425 			break;
1426 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1427 	}
1428 }
1429 
1430 /* Total number of image pages */
1431 static unsigned int nr_copy_pages;
1432 /* Number of pages needed for saving the original pfns of the image pages */
1433 static unsigned int nr_meta_pages;
1434 /*
1435  * Numbers of normal and highmem page frames allocated for hibernation image
1436  * before suspending devices.
1437  */
1438 static unsigned int alloc_normal, alloc_highmem;
1439 /*
1440  * Memory bitmap used for marking saveable pages (during hibernation) or
1441  * hibernation image pages (during restore)
1442  */
1443 static struct memory_bitmap orig_bm;
1444 /*
1445  * Memory bitmap used during hibernation for marking allocated page frames that
1446  * will contain copies of saveable pages.  During restore it is initially used
1447  * for marking hibernation image pages, but then the set bits from it are
1448  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1449  * used for marking "safe" highmem pages, but it has to be reinitialized for
1450  * this purpose.
1451  */
1452 static struct memory_bitmap copy_bm;
1453 
1454 /**
1455  * swsusp_free - Free pages allocated for hibernation image.
1456  *
1457  * Image pages are alocated before snapshot creation, so they need to be
1458  * released after resume.
1459  */
1460 void swsusp_free(void)
1461 {
1462 	unsigned long fb_pfn, fr_pfn;
1463 
1464 	if (!forbidden_pages_map || !free_pages_map)
1465 		goto out;
1466 
1467 	memory_bm_position_reset(forbidden_pages_map);
1468 	memory_bm_position_reset(free_pages_map);
1469 
1470 loop:
1471 	fr_pfn = memory_bm_next_pfn(free_pages_map);
1472 	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1473 
1474 	/*
1475 	 * Find the next bit set in both bitmaps. This is guaranteed to
1476 	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1477 	 */
1478 	do {
1479 		if (fb_pfn < fr_pfn)
1480 			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1481 		if (fr_pfn < fb_pfn)
1482 			fr_pfn = memory_bm_next_pfn(free_pages_map);
1483 	} while (fb_pfn != fr_pfn);
1484 
1485 	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1486 		struct page *page = pfn_to_page(fr_pfn);
1487 
1488 		memory_bm_clear_current(forbidden_pages_map);
1489 		memory_bm_clear_current(free_pages_map);
1490 		hibernate_restore_unprotect_page(page_address(page));
1491 		__free_page(page);
1492 		goto loop;
1493 	}
1494 
1495 out:
1496 	nr_copy_pages = 0;
1497 	nr_meta_pages = 0;
1498 	restore_pblist = NULL;
1499 	buffer = NULL;
1500 	alloc_normal = 0;
1501 	alloc_highmem = 0;
1502 	hibernate_restore_protection_end();
1503 }
1504 
1505 /* Helper functions used for the shrinking of memory. */
1506 
1507 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1508 
1509 /**
1510  * preallocate_image_pages - Allocate a number of pages for hibernation image.
1511  * @nr_pages: Number of page frames to allocate.
1512  * @mask: GFP flags to use for the allocation.
1513  *
1514  * Return value: Number of page frames actually allocated
1515  */
1516 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1517 {
1518 	unsigned long nr_alloc = 0;
1519 
1520 	while (nr_pages > 0) {
1521 		struct page *page;
1522 
1523 		page = alloc_image_page(mask);
1524 		if (!page)
1525 			break;
1526 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1527 		if (PageHighMem(page))
1528 			alloc_highmem++;
1529 		else
1530 			alloc_normal++;
1531 		nr_pages--;
1532 		nr_alloc++;
1533 	}
1534 
1535 	return nr_alloc;
1536 }
1537 
1538 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1539 					      unsigned long avail_normal)
1540 {
1541 	unsigned long alloc;
1542 
1543 	if (avail_normal <= alloc_normal)
1544 		return 0;
1545 
1546 	alloc = avail_normal - alloc_normal;
1547 	if (nr_pages < alloc)
1548 		alloc = nr_pages;
1549 
1550 	return preallocate_image_pages(alloc, GFP_IMAGE);
1551 }
1552 
1553 #ifdef CONFIG_HIGHMEM
1554 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1555 {
1556 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1557 }
1558 
1559 /**
1560  *  __fraction - Compute (an approximation of) x * (multiplier / base).
1561  */
1562 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1563 {
1564 	x *= multiplier;
1565 	do_div(x, base);
1566 	return (unsigned long)x;
1567 }
1568 
1569 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1570 						  unsigned long highmem,
1571 						  unsigned long total)
1572 {
1573 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1574 
1575 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1576 }
1577 #else /* CONFIG_HIGHMEM */
1578 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1579 {
1580 	return 0;
1581 }
1582 
1583 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1584 							 unsigned long highmem,
1585 							 unsigned long total)
1586 {
1587 	return 0;
1588 }
1589 #endif /* CONFIG_HIGHMEM */
1590 
1591 /**
1592  * free_unnecessary_pages - Release preallocated pages not needed for the image.
1593  */
1594 static unsigned long free_unnecessary_pages(void)
1595 {
1596 	unsigned long save, to_free_normal, to_free_highmem, free;
1597 
1598 	save = count_data_pages();
1599 	if (alloc_normal >= save) {
1600 		to_free_normal = alloc_normal - save;
1601 		save = 0;
1602 	} else {
1603 		to_free_normal = 0;
1604 		save -= alloc_normal;
1605 	}
1606 	save += count_highmem_pages();
1607 	if (alloc_highmem >= save) {
1608 		to_free_highmem = alloc_highmem - save;
1609 	} else {
1610 		to_free_highmem = 0;
1611 		save -= alloc_highmem;
1612 		if (to_free_normal > save)
1613 			to_free_normal -= save;
1614 		else
1615 			to_free_normal = 0;
1616 	}
1617 	free = to_free_normal + to_free_highmem;
1618 
1619 	memory_bm_position_reset(&copy_bm);
1620 
1621 	while (to_free_normal > 0 || to_free_highmem > 0) {
1622 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1623 		struct page *page = pfn_to_page(pfn);
1624 
1625 		if (PageHighMem(page)) {
1626 			if (!to_free_highmem)
1627 				continue;
1628 			to_free_highmem--;
1629 			alloc_highmem--;
1630 		} else {
1631 			if (!to_free_normal)
1632 				continue;
1633 			to_free_normal--;
1634 			alloc_normal--;
1635 		}
1636 		memory_bm_clear_bit(&copy_bm, pfn);
1637 		swsusp_unset_page_forbidden(page);
1638 		swsusp_unset_page_free(page);
1639 		__free_page(page);
1640 	}
1641 
1642 	return free;
1643 }
1644 
1645 /**
1646  * minimum_image_size - Estimate the minimum acceptable size of an image.
1647  * @saveable: Number of saveable pages in the system.
1648  *
1649  * We want to avoid attempting to free too much memory too hard, so estimate the
1650  * minimum acceptable size of a hibernation image to use as the lower limit for
1651  * preallocating memory.
1652  *
1653  * We assume that the minimum image size should be proportional to
1654  *
1655  * [number of saveable pages] - [number of pages that can be freed in theory]
1656  *
1657  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1658  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1659  */
1660 static unsigned long minimum_image_size(unsigned long saveable)
1661 {
1662 	unsigned long size;
1663 
1664 	size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1665 		+ global_node_page_state(NR_ACTIVE_ANON)
1666 		+ global_node_page_state(NR_INACTIVE_ANON)
1667 		+ global_node_page_state(NR_ACTIVE_FILE)
1668 		+ global_node_page_state(NR_INACTIVE_FILE);
1669 
1670 	return saveable <= size ? 0 : saveable - size;
1671 }
1672 
1673 /**
1674  * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1675  *
1676  * To create a hibernation image it is necessary to make a copy of every page
1677  * frame in use.  We also need a number of page frames to be free during
1678  * hibernation for allocations made while saving the image and for device
1679  * drivers, in case they need to allocate memory from their hibernation
1680  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1681  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1682  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1683  * total number of available page frames and allocate at least
1684  *
1685  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1686  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1687  *
1688  * of them, which corresponds to the maximum size of a hibernation image.
1689  *
1690  * If image_size is set below the number following from the above formula,
1691  * the preallocation of memory is continued until the total number of saveable
1692  * pages in the system is below the requested image size or the minimum
1693  * acceptable image size returned by minimum_image_size(), whichever is greater.
1694  */
1695 int hibernate_preallocate_memory(void)
1696 {
1697 	struct zone *zone;
1698 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1699 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1700 	ktime_t start, stop;
1701 	int error;
1702 
1703 	pr_info("Preallocating image memory... ");
1704 	start = ktime_get();
1705 
1706 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1707 	if (error)
1708 		goto err_out;
1709 
1710 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1711 	if (error)
1712 		goto err_out;
1713 
1714 	alloc_normal = 0;
1715 	alloc_highmem = 0;
1716 
1717 	/* Count the number of saveable data pages. */
1718 	save_highmem = count_highmem_pages();
1719 	saveable = count_data_pages();
1720 
1721 	/*
1722 	 * Compute the total number of page frames we can use (count) and the
1723 	 * number of pages needed for image metadata (size).
1724 	 */
1725 	count = saveable;
1726 	saveable += save_highmem;
1727 	highmem = save_highmem;
1728 	size = 0;
1729 	for_each_populated_zone(zone) {
1730 		size += snapshot_additional_pages(zone);
1731 		if (is_highmem(zone))
1732 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1733 		else
1734 			count += zone_page_state(zone, NR_FREE_PAGES);
1735 	}
1736 	avail_normal = count;
1737 	count += highmem;
1738 	count -= totalreserve_pages;
1739 
1740 	/* Add number of pages required for page keys (s390 only). */
1741 	size += page_key_additional_pages(saveable);
1742 
1743 	/* Compute the maximum number of saveable pages to leave in memory. */
1744 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1745 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1746 	/* Compute the desired number of image pages specified by image_size. */
1747 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1748 	if (size > max_size)
1749 		size = max_size;
1750 	/*
1751 	 * If the desired number of image pages is at least as large as the
1752 	 * current number of saveable pages in memory, allocate page frames for
1753 	 * the image and we're done.
1754 	 */
1755 	if (size >= saveable) {
1756 		pages = preallocate_image_highmem(save_highmem);
1757 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1758 		goto out;
1759 	}
1760 
1761 	/* Estimate the minimum size of the image. */
1762 	pages = minimum_image_size(saveable);
1763 	/*
1764 	 * To avoid excessive pressure on the normal zone, leave room in it to
1765 	 * accommodate an image of the minimum size (unless it's already too
1766 	 * small, in which case don't preallocate pages from it at all).
1767 	 */
1768 	if (avail_normal > pages)
1769 		avail_normal -= pages;
1770 	else
1771 		avail_normal = 0;
1772 	if (size < pages)
1773 		size = min_t(unsigned long, pages, max_size);
1774 
1775 	/*
1776 	 * Let the memory management subsystem know that we're going to need a
1777 	 * large number of page frames to allocate and make it free some memory.
1778 	 * NOTE: If this is not done, performance will be hurt badly in some
1779 	 * test cases.
1780 	 */
1781 	shrink_all_memory(saveable - size);
1782 
1783 	/*
1784 	 * The number of saveable pages in memory was too high, so apply some
1785 	 * pressure to decrease it.  First, make room for the largest possible
1786 	 * image and fail if that doesn't work.  Next, try to decrease the size
1787 	 * of the image as much as indicated by 'size' using allocations from
1788 	 * highmem and non-highmem zones separately.
1789 	 */
1790 	pages_highmem = preallocate_image_highmem(highmem / 2);
1791 	alloc = count - max_size;
1792 	if (alloc > pages_highmem)
1793 		alloc -= pages_highmem;
1794 	else
1795 		alloc = 0;
1796 	pages = preallocate_image_memory(alloc, avail_normal);
1797 	if (pages < alloc) {
1798 		/* We have exhausted non-highmem pages, try highmem. */
1799 		alloc -= pages;
1800 		pages += pages_highmem;
1801 		pages_highmem = preallocate_image_highmem(alloc);
1802 		if (pages_highmem < alloc)
1803 			goto err_out;
1804 		pages += pages_highmem;
1805 		/*
1806 		 * size is the desired number of saveable pages to leave in
1807 		 * memory, so try to preallocate (all memory - size) pages.
1808 		 */
1809 		alloc = (count - pages) - size;
1810 		pages += preallocate_image_highmem(alloc);
1811 	} else {
1812 		/*
1813 		 * There are approximately max_size saveable pages at this point
1814 		 * and we want to reduce this number down to size.
1815 		 */
1816 		alloc = max_size - size;
1817 		size = preallocate_highmem_fraction(alloc, highmem, count);
1818 		pages_highmem += size;
1819 		alloc -= size;
1820 		size = preallocate_image_memory(alloc, avail_normal);
1821 		pages_highmem += preallocate_image_highmem(alloc - size);
1822 		pages += pages_highmem + size;
1823 	}
1824 
1825 	/*
1826 	 * We only need as many page frames for the image as there are saveable
1827 	 * pages in memory, but we have allocated more.  Release the excessive
1828 	 * ones now.
1829 	 */
1830 	pages -= free_unnecessary_pages();
1831 
1832  out:
1833 	stop = ktime_get();
1834 	pr_cont("done (allocated %lu pages)\n", pages);
1835 	swsusp_show_speed(start, stop, pages, "Allocated");
1836 
1837 	return 0;
1838 
1839  err_out:
1840 	pr_cont("\n");
1841 	swsusp_free();
1842 	return -ENOMEM;
1843 }
1844 
1845 #ifdef CONFIG_HIGHMEM
1846 /**
1847  * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1848  *
1849  * Compute the number of non-highmem pages that will be necessary for creating
1850  * copies of highmem pages.
1851  */
1852 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1853 {
1854 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1855 
1856 	if (free_highmem >= nr_highmem)
1857 		nr_highmem = 0;
1858 	else
1859 		nr_highmem -= free_highmem;
1860 
1861 	return nr_highmem;
1862 }
1863 #else
1864 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1865 #endif /* CONFIG_HIGHMEM */
1866 
1867 /**
1868  * enough_free_mem - Check if there is enough free memory for the image.
1869  */
1870 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1871 {
1872 	struct zone *zone;
1873 	unsigned int free = alloc_normal;
1874 
1875 	for_each_populated_zone(zone)
1876 		if (!is_highmem(zone))
1877 			free += zone_page_state(zone, NR_FREE_PAGES);
1878 
1879 	nr_pages += count_pages_for_highmem(nr_highmem);
1880 	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1881 		 nr_pages, PAGES_FOR_IO, free);
1882 
1883 	return free > nr_pages + PAGES_FOR_IO;
1884 }
1885 
1886 #ifdef CONFIG_HIGHMEM
1887 /**
1888  * get_highmem_buffer - Allocate a buffer for highmem pages.
1889  *
1890  * If there are some highmem pages in the hibernation image, we may need a
1891  * buffer to copy them and/or load their data.
1892  */
1893 static inline int get_highmem_buffer(int safe_needed)
1894 {
1895 	buffer = get_image_page(GFP_ATOMIC, safe_needed);
1896 	return buffer ? 0 : -ENOMEM;
1897 }
1898 
1899 /**
1900  * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1901  *
1902  * Try to allocate as many pages as needed, but if the number of free highmem
1903  * pages is less than that, allocate them all.
1904  */
1905 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1906 					       unsigned int nr_highmem)
1907 {
1908 	unsigned int to_alloc = count_free_highmem_pages();
1909 
1910 	if (to_alloc > nr_highmem)
1911 		to_alloc = nr_highmem;
1912 
1913 	nr_highmem -= to_alloc;
1914 	while (to_alloc-- > 0) {
1915 		struct page *page;
1916 
1917 		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1918 		memory_bm_set_bit(bm, page_to_pfn(page));
1919 	}
1920 	return nr_highmem;
1921 }
1922 #else
1923 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1924 
1925 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1926 					       unsigned int n) { return 0; }
1927 #endif /* CONFIG_HIGHMEM */
1928 
1929 /**
1930  * swsusp_alloc - Allocate memory for hibernation image.
1931  *
1932  * We first try to allocate as many highmem pages as there are
1933  * saveable highmem pages in the system.  If that fails, we allocate
1934  * non-highmem pages for the copies of the remaining highmem ones.
1935  *
1936  * In this approach it is likely that the copies of highmem pages will
1937  * also be located in the high memory, because of the way in which
1938  * copy_data_pages() works.
1939  */
1940 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1941 			unsigned int nr_pages, unsigned int nr_highmem)
1942 {
1943 	if (nr_highmem > 0) {
1944 		if (get_highmem_buffer(PG_ANY))
1945 			goto err_out;
1946 		if (nr_highmem > alloc_highmem) {
1947 			nr_highmem -= alloc_highmem;
1948 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1949 		}
1950 	}
1951 	if (nr_pages > alloc_normal) {
1952 		nr_pages -= alloc_normal;
1953 		while (nr_pages-- > 0) {
1954 			struct page *page;
1955 
1956 			page = alloc_image_page(GFP_ATOMIC);
1957 			if (!page)
1958 				goto err_out;
1959 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1960 		}
1961 	}
1962 
1963 	return 0;
1964 
1965  err_out:
1966 	swsusp_free();
1967 	return -ENOMEM;
1968 }
1969 
1970 asmlinkage __visible int swsusp_save(void)
1971 {
1972 	unsigned int nr_pages, nr_highmem;
1973 
1974 	pr_info("Creating hibernation image:\n");
1975 
1976 	drain_local_pages(NULL);
1977 	nr_pages = count_data_pages();
1978 	nr_highmem = count_highmem_pages();
1979 	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1980 
1981 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1982 		pr_err("Not enough free memory\n");
1983 		return -ENOMEM;
1984 	}
1985 
1986 	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1987 		pr_err("Memory allocation failed\n");
1988 		return -ENOMEM;
1989 	}
1990 
1991 	/*
1992 	 * During allocating of suspend pagedir, new cold pages may appear.
1993 	 * Kill them.
1994 	 */
1995 	drain_local_pages(NULL);
1996 	copy_data_pages(&copy_bm, &orig_bm);
1997 
1998 	/*
1999 	 * End of critical section. From now on, we can write to memory,
2000 	 * but we should not touch disk. This specially means we must _not_
2001 	 * touch swap space! Except we must write out our image of course.
2002 	 */
2003 
2004 	nr_pages += nr_highmem;
2005 	nr_copy_pages = nr_pages;
2006 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2007 
2008 	pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2009 
2010 	return 0;
2011 }
2012 
2013 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2014 static int init_header_complete(struct swsusp_info *info)
2015 {
2016 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2017 	info->version_code = LINUX_VERSION_CODE;
2018 	return 0;
2019 }
2020 
2021 static char *check_image_kernel(struct swsusp_info *info)
2022 {
2023 	if (info->version_code != LINUX_VERSION_CODE)
2024 		return "kernel version";
2025 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2026 		return "system type";
2027 	if (strcmp(info->uts.release,init_utsname()->release))
2028 		return "kernel release";
2029 	if (strcmp(info->uts.version,init_utsname()->version))
2030 		return "version";
2031 	if (strcmp(info->uts.machine,init_utsname()->machine))
2032 		return "machine";
2033 	return NULL;
2034 }
2035 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2036 
2037 unsigned long snapshot_get_image_size(void)
2038 {
2039 	return nr_copy_pages + nr_meta_pages + 1;
2040 }
2041 
2042 static int init_header(struct swsusp_info *info)
2043 {
2044 	memset(info, 0, sizeof(struct swsusp_info));
2045 	info->num_physpages = get_num_physpages();
2046 	info->image_pages = nr_copy_pages;
2047 	info->pages = snapshot_get_image_size();
2048 	info->size = info->pages;
2049 	info->size <<= PAGE_SHIFT;
2050 	return init_header_complete(info);
2051 }
2052 
2053 /**
2054  * pack_pfns - Prepare PFNs for saving.
2055  * @bm: Memory bitmap.
2056  * @buf: Memory buffer to store the PFNs in.
2057  *
2058  * PFNs corresponding to set bits in @bm are stored in the area of memory
2059  * pointed to by @buf (1 page at a time).
2060  */
2061 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2062 {
2063 	int j;
2064 
2065 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2066 		buf[j] = memory_bm_next_pfn(bm);
2067 		if (unlikely(buf[j] == BM_END_OF_MAP))
2068 			break;
2069 		/* Save page key for data page (s390 only). */
2070 		page_key_read(buf + j);
2071 	}
2072 }
2073 
2074 /**
2075  * snapshot_read_next - Get the address to read the next image page from.
2076  * @handle: Snapshot handle to be used for the reading.
2077  *
2078  * On the first call, @handle should point to a zeroed snapshot_handle
2079  * structure.  The structure gets populated then and a pointer to it should be
2080  * passed to this function every next time.
2081  *
2082  * On success, the function returns a positive number.  Then, the caller
2083  * is allowed to read up to the returned number of bytes from the memory
2084  * location computed by the data_of() macro.
2085  *
2086  * The function returns 0 to indicate the end of the data stream condition,
2087  * and negative numbers are returned on errors.  If that happens, the structure
2088  * pointed to by @handle is not updated and should not be used any more.
2089  */
2090 int snapshot_read_next(struct snapshot_handle *handle)
2091 {
2092 	if (handle->cur > nr_meta_pages + nr_copy_pages)
2093 		return 0;
2094 
2095 	if (!buffer) {
2096 		/* This makes the buffer be freed by swsusp_free() */
2097 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2098 		if (!buffer)
2099 			return -ENOMEM;
2100 	}
2101 	if (!handle->cur) {
2102 		int error;
2103 
2104 		error = init_header((struct swsusp_info *)buffer);
2105 		if (error)
2106 			return error;
2107 		handle->buffer = buffer;
2108 		memory_bm_position_reset(&orig_bm);
2109 		memory_bm_position_reset(&copy_bm);
2110 	} else if (handle->cur <= nr_meta_pages) {
2111 		clear_page(buffer);
2112 		pack_pfns(buffer, &orig_bm);
2113 	} else {
2114 		struct page *page;
2115 
2116 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2117 		if (PageHighMem(page)) {
2118 			/*
2119 			 * Highmem pages are copied to the buffer,
2120 			 * because we can't return with a kmapped
2121 			 * highmem page (we may not be called again).
2122 			 */
2123 			void *kaddr;
2124 
2125 			kaddr = kmap_atomic(page);
2126 			copy_page(buffer, kaddr);
2127 			kunmap_atomic(kaddr);
2128 			handle->buffer = buffer;
2129 		} else {
2130 			handle->buffer = page_address(page);
2131 		}
2132 	}
2133 	handle->cur++;
2134 	return PAGE_SIZE;
2135 }
2136 
2137 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2138 				    struct memory_bitmap *src)
2139 {
2140 	unsigned long pfn;
2141 
2142 	memory_bm_position_reset(src);
2143 	pfn = memory_bm_next_pfn(src);
2144 	while (pfn != BM_END_OF_MAP) {
2145 		memory_bm_set_bit(dst, pfn);
2146 		pfn = memory_bm_next_pfn(src);
2147 	}
2148 }
2149 
2150 /**
2151  * mark_unsafe_pages - Mark pages that were used before hibernation.
2152  *
2153  * Mark the pages that cannot be used for storing the image during restoration,
2154  * because they conflict with the pages that had been used before hibernation.
2155  */
2156 static void mark_unsafe_pages(struct memory_bitmap *bm)
2157 {
2158 	unsigned long pfn;
2159 
2160 	/* Clear the "free"/"unsafe" bit for all PFNs */
2161 	memory_bm_position_reset(free_pages_map);
2162 	pfn = memory_bm_next_pfn(free_pages_map);
2163 	while (pfn != BM_END_OF_MAP) {
2164 		memory_bm_clear_current(free_pages_map);
2165 		pfn = memory_bm_next_pfn(free_pages_map);
2166 	}
2167 
2168 	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2169 	duplicate_memory_bitmap(free_pages_map, bm);
2170 
2171 	allocated_unsafe_pages = 0;
2172 }
2173 
2174 static int check_header(struct swsusp_info *info)
2175 {
2176 	char *reason;
2177 
2178 	reason = check_image_kernel(info);
2179 	if (!reason && info->num_physpages != get_num_physpages())
2180 		reason = "memory size";
2181 	if (reason) {
2182 		pr_err("Image mismatch: %s\n", reason);
2183 		return -EPERM;
2184 	}
2185 	return 0;
2186 }
2187 
2188 /**
2189  * load header - Check the image header and copy the data from it.
2190  */
2191 static int load_header(struct swsusp_info *info)
2192 {
2193 	int error;
2194 
2195 	restore_pblist = NULL;
2196 	error = check_header(info);
2197 	if (!error) {
2198 		nr_copy_pages = info->image_pages;
2199 		nr_meta_pages = info->pages - info->image_pages - 1;
2200 	}
2201 	return error;
2202 }
2203 
2204 /**
2205  * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2206  * @bm: Memory bitmap.
2207  * @buf: Area of memory containing the PFNs.
2208  *
2209  * For each element of the array pointed to by @buf (1 page at a time), set the
2210  * corresponding bit in @bm.
2211  */
2212 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2213 {
2214 	int j;
2215 
2216 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2217 		if (unlikely(buf[j] == BM_END_OF_MAP))
2218 			break;
2219 
2220 		/* Extract and buffer page key for data page (s390 only). */
2221 		page_key_memorize(buf + j);
2222 
2223 		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2224 			memory_bm_set_bit(bm, buf[j]);
2225 		else
2226 			return -EFAULT;
2227 	}
2228 
2229 	return 0;
2230 }
2231 
2232 #ifdef CONFIG_HIGHMEM
2233 /*
2234  * struct highmem_pbe is used for creating the list of highmem pages that
2235  * should be restored atomically during the resume from disk, because the page
2236  * frames they have occupied before the suspend are in use.
2237  */
2238 struct highmem_pbe {
2239 	struct page *copy_page;	/* data is here now */
2240 	struct page *orig_page;	/* data was here before the suspend */
2241 	struct highmem_pbe *next;
2242 };
2243 
2244 /*
2245  * List of highmem PBEs needed for restoring the highmem pages that were
2246  * allocated before the suspend and included in the suspend image, but have
2247  * also been allocated by the "resume" kernel, so their contents cannot be
2248  * written directly to their "original" page frames.
2249  */
2250 static struct highmem_pbe *highmem_pblist;
2251 
2252 /**
2253  * count_highmem_image_pages - Compute the number of highmem pages in the image.
2254  * @bm: Memory bitmap.
2255  *
2256  * The bits in @bm that correspond to image pages are assumed to be set.
2257  */
2258 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2259 {
2260 	unsigned long pfn;
2261 	unsigned int cnt = 0;
2262 
2263 	memory_bm_position_reset(bm);
2264 	pfn = memory_bm_next_pfn(bm);
2265 	while (pfn != BM_END_OF_MAP) {
2266 		if (PageHighMem(pfn_to_page(pfn)))
2267 			cnt++;
2268 
2269 		pfn = memory_bm_next_pfn(bm);
2270 	}
2271 	return cnt;
2272 }
2273 
2274 static unsigned int safe_highmem_pages;
2275 
2276 static struct memory_bitmap *safe_highmem_bm;
2277 
2278 /**
2279  * prepare_highmem_image - Allocate memory for loading highmem data from image.
2280  * @bm: Pointer to an uninitialized memory bitmap structure.
2281  * @nr_highmem_p: Pointer to the number of highmem image pages.
2282  *
2283  * Try to allocate as many highmem pages as there are highmem image pages
2284  * (@nr_highmem_p points to the variable containing the number of highmem image
2285  * pages).  The pages that are "safe" (ie. will not be overwritten when the
2286  * hibernation image is restored entirely) have the corresponding bits set in
2287  * @bm (it must be unitialized).
2288  *
2289  * NOTE: This function should not be called if there are no highmem image pages.
2290  */
2291 static int prepare_highmem_image(struct memory_bitmap *bm,
2292 				 unsigned int *nr_highmem_p)
2293 {
2294 	unsigned int to_alloc;
2295 
2296 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2297 		return -ENOMEM;
2298 
2299 	if (get_highmem_buffer(PG_SAFE))
2300 		return -ENOMEM;
2301 
2302 	to_alloc = count_free_highmem_pages();
2303 	if (to_alloc > *nr_highmem_p)
2304 		to_alloc = *nr_highmem_p;
2305 	else
2306 		*nr_highmem_p = to_alloc;
2307 
2308 	safe_highmem_pages = 0;
2309 	while (to_alloc-- > 0) {
2310 		struct page *page;
2311 
2312 		page = alloc_page(__GFP_HIGHMEM);
2313 		if (!swsusp_page_is_free(page)) {
2314 			/* The page is "safe", set its bit the bitmap */
2315 			memory_bm_set_bit(bm, page_to_pfn(page));
2316 			safe_highmem_pages++;
2317 		}
2318 		/* Mark the page as allocated */
2319 		swsusp_set_page_forbidden(page);
2320 		swsusp_set_page_free(page);
2321 	}
2322 	memory_bm_position_reset(bm);
2323 	safe_highmem_bm = bm;
2324 	return 0;
2325 }
2326 
2327 static struct page *last_highmem_page;
2328 
2329 /**
2330  * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2331  *
2332  * For a given highmem image page get a buffer that suspend_write_next() should
2333  * return to its caller to write to.
2334  *
2335  * If the page is to be saved to its "original" page frame or a copy of
2336  * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2337  * the copy of the page is to be made in normal memory, so the address of
2338  * the copy is returned.
2339  *
2340  * If @buffer is returned, the caller of suspend_write_next() will write
2341  * the page's contents to @buffer, so they will have to be copied to the
2342  * right location on the next call to suspend_write_next() and it is done
2343  * with the help of copy_last_highmem_page().  For this purpose, if
2344  * @buffer is returned, @last_highmem_page is set to the page to which
2345  * the data will have to be copied from @buffer.
2346  */
2347 static void *get_highmem_page_buffer(struct page *page,
2348 				     struct chain_allocator *ca)
2349 {
2350 	struct highmem_pbe *pbe;
2351 	void *kaddr;
2352 
2353 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2354 		/*
2355 		 * We have allocated the "original" page frame and we can
2356 		 * use it directly to store the loaded page.
2357 		 */
2358 		last_highmem_page = page;
2359 		return buffer;
2360 	}
2361 	/*
2362 	 * The "original" page frame has not been allocated and we have to
2363 	 * use a "safe" page frame to store the loaded page.
2364 	 */
2365 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2366 	if (!pbe) {
2367 		swsusp_free();
2368 		return ERR_PTR(-ENOMEM);
2369 	}
2370 	pbe->orig_page = page;
2371 	if (safe_highmem_pages > 0) {
2372 		struct page *tmp;
2373 
2374 		/* Copy of the page will be stored in high memory */
2375 		kaddr = buffer;
2376 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2377 		safe_highmem_pages--;
2378 		last_highmem_page = tmp;
2379 		pbe->copy_page = tmp;
2380 	} else {
2381 		/* Copy of the page will be stored in normal memory */
2382 		kaddr = safe_pages_list;
2383 		safe_pages_list = safe_pages_list->next;
2384 		pbe->copy_page = virt_to_page(kaddr);
2385 	}
2386 	pbe->next = highmem_pblist;
2387 	highmem_pblist = pbe;
2388 	return kaddr;
2389 }
2390 
2391 /**
2392  * copy_last_highmem_page - Copy most the most recent highmem image page.
2393  *
2394  * Copy the contents of a highmem image from @buffer, where the caller of
2395  * snapshot_write_next() has stored them, to the right location represented by
2396  * @last_highmem_page .
2397  */
2398 static void copy_last_highmem_page(void)
2399 {
2400 	if (last_highmem_page) {
2401 		void *dst;
2402 
2403 		dst = kmap_atomic(last_highmem_page);
2404 		copy_page(dst, buffer);
2405 		kunmap_atomic(dst);
2406 		last_highmem_page = NULL;
2407 	}
2408 }
2409 
2410 static inline int last_highmem_page_copied(void)
2411 {
2412 	return !last_highmem_page;
2413 }
2414 
2415 static inline void free_highmem_data(void)
2416 {
2417 	if (safe_highmem_bm)
2418 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2419 
2420 	if (buffer)
2421 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2422 }
2423 #else
2424 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2425 
2426 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2427 					unsigned int *nr_highmem_p) { return 0; }
2428 
2429 static inline void *get_highmem_page_buffer(struct page *page,
2430 					    struct chain_allocator *ca)
2431 {
2432 	return ERR_PTR(-EINVAL);
2433 }
2434 
2435 static inline void copy_last_highmem_page(void) {}
2436 static inline int last_highmem_page_copied(void) { return 1; }
2437 static inline void free_highmem_data(void) {}
2438 #endif /* CONFIG_HIGHMEM */
2439 
2440 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2441 
2442 /**
2443  * prepare_image - Make room for loading hibernation image.
2444  * @new_bm: Unitialized memory bitmap structure.
2445  * @bm: Memory bitmap with unsafe pages marked.
2446  *
2447  * Use @bm to mark the pages that will be overwritten in the process of
2448  * restoring the system memory state from the suspend image ("unsafe" pages)
2449  * and allocate memory for the image.
2450  *
2451  * The idea is to allocate a new memory bitmap first and then allocate
2452  * as many pages as needed for image data, but without specifying what those
2453  * pages will be used for just yet.  Instead, we mark them all as allocated and
2454  * create a lists of "safe" pages to be used later.  On systems with high
2455  * memory a list of "safe" highmem pages is created too.
2456  */
2457 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2458 {
2459 	unsigned int nr_pages, nr_highmem;
2460 	struct linked_page *lp;
2461 	int error;
2462 
2463 	/* If there is no highmem, the buffer will not be necessary */
2464 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2465 	buffer = NULL;
2466 
2467 	nr_highmem = count_highmem_image_pages(bm);
2468 	mark_unsafe_pages(bm);
2469 
2470 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2471 	if (error)
2472 		goto Free;
2473 
2474 	duplicate_memory_bitmap(new_bm, bm);
2475 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2476 	if (nr_highmem > 0) {
2477 		error = prepare_highmem_image(bm, &nr_highmem);
2478 		if (error)
2479 			goto Free;
2480 	}
2481 	/*
2482 	 * Reserve some safe pages for potential later use.
2483 	 *
2484 	 * NOTE: This way we make sure there will be enough safe pages for the
2485 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2486 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2487 	 *
2488 	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2489 	 */
2490 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2491 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2492 	while (nr_pages > 0) {
2493 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2494 		if (!lp) {
2495 			error = -ENOMEM;
2496 			goto Free;
2497 		}
2498 		lp->next = safe_pages_list;
2499 		safe_pages_list = lp;
2500 		nr_pages--;
2501 	}
2502 	/* Preallocate memory for the image */
2503 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2504 	while (nr_pages > 0) {
2505 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2506 		if (!lp) {
2507 			error = -ENOMEM;
2508 			goto Free;
2509 		}
2510 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2511 			/* The page is "safe", add it to the list */
2512 			lp->next = safe_pages_list;
2513 			safe_pages_list = lp;
2514 		}
2515 		/* Mark the page as allocated */
2516 		swsusp_set_page_forbidden(virt_to_page(lp));
2517 		swsusp_set_page_free(virt_to_page(lp));
2518 		nr_pages--;
2519 	}
2520 	return 0;
2521 
2522  Free:
2523 	swsusp_free();
2524 	return error;
2525 }
2526 
2527 /**
2528  * get_buffer - Get the address to store the next image data page.
2529  *
2530  * Get the address that snapshot_write_next() should return to its caller to
2531  * write to.
2532  */
2533 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2534 {
2535 	struct pbe *pbe;
2536 	struct page *page;
2537 	unsigned long pfn = memory_bm_next_pfn(bm);
2538 
2539 	if (pfn == BM_END_OF_MAP)
2540 		return ERR_PTR(-EFAULT);
2541 
2542 	page = pfn_to_page(pfn);
2543 	if (PageHighMem(page))
2544 		return get_highmem_page_buffer(page, ca);
2545 
2546 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2547 		/*
2548 		 * We have allocated the "original" page frame and we can
2549 		 * use it directly to store the loaded page.
2550 		 */
2551 		return page_address(page);
2552 
2553 	/*
2554 	 * The "original" page frame has not been allocated and we have to
2555 	 * use a "safe" page frame to store the loaded page.
2556 	 */
2557 	pbe = chain_alloc(ca, sizeof(struct pbe));
2558 	if (!pbe) {
2559 		swsusp_free();
2560 		return ERR_PTR(-ENOMEM);
2561 	}
2562 	pbe->orig_address = page_address(page);
2563 	pbe->address = safe_pages_list;
2564 	safe_pages_list = safe_pages_list->next;
2565 	pbe->next = restore_pblist;
2566 	restore_pblist = pbe;
2567 	return pbe->address;
2568 }
2569 
2570 /**
2571  * snapshot_write_next - Get the address to store the next image page.
2572  * @handle: Snapshot handle structure to guide the writing.
2573  *
2574  * On the first call, @handle should point to a zeroed snapshot_handle
2575  * structure.  The structure gets populated then and a pointer to it should be
2576  * passed to this function every next time.
2577  *
2578  * On success, the function returns a positive number.  Then, the caller
2579  * is allowed to write up to the returned number of bytes to the memory
2580  * location computed by the data_of() macro.
2581  *
2582  * The function returns 0 to indicate the "end of file" condition.  Negative
2583  * numbers are returned on errors, in which cases the structure pointed to by
2584  * @handle is not updated and should not be used any more.
2585  */
2586 int snapshot_write_next(struct snapshot_handle *handle)
2587 {
2588 	static struct chain_allocator ca;
2589 	int error = 0;
2590 
2591 	/* Check if we have already loaded the entire image */
2592 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2593 		return 0;
2594 
2595 	handle->sync_read = 1;
2596 
2597 	if (!handle->cur) {
2598 		if (!buffer)
2599 			/* This makes the buffer be freed by swsusp_free() */
2600 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2601 
2602 		if (!buffer)
2603 			return -ENOMEM;
2604 
2605 		handle->buffer = buffer;
2606 	} else if (handle->cur == 1) {
2607 		error = load_header(buffer);
2608 		if (error)
2609 			return error;
2610 
2611 		safe_pages_list = NULL;
2612 
2613 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2614 		if (error)
2615 			return error;
2616 
2617 		/* Allocate buffer for page keys. */
2618 		error = page_key_alloc(nr_copy_pages);
2619 		if (error)
2620 			return error;
2621 
2622 		hibernate_restore_protection_begin();
2623 	} else if (handle->cur <= nr_meta_pages + 1) {
2624 		error = unpack_orig_pfns(buffer, &copy_bm);
2625 		if (error)
2626 			return error;
2627 
2628 		if (handle->cur == nr_meta_pages + 1) {
2629 			error = prepare_image(&orig_bm, &copy_bm);
2630 			if (error)
2631 				return error;
2632 
2633 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2634 			memory_bm_position_reset(&orig_bm);
2635 			restore_pblist = NULL;
2636 			handle->buffer = get_buffer(&orig_bm, &ca);
2637 			handle->sync_read = 0;
2638 			if (IS_ERR(handle->buffer))
2639 				return PTR_ERR(handle->buffer);
2640 		}
2641 	} else {
2642 		copy_last_highmem_page();
2643 		/* Restore page key for data page (s390 only). */
2644 		page_key_write(handle->buffer);
2645 		hibernate_restore_protect_page(handle->buffer);
2646 		handle->buffer = get_buffer(&orig_bm, &ca);
2647 		if (IS_ERR(handle->buffer))
2648 			return PTR_ERR(handle->buffer);
2649 		if (handle->buffer != buffer)
2650 			handle->sync_read = 0;
2651 	}
2652 	handle->cur++;
2653 	return PAGE_SIZE;
2654 }
2655 
2656 /**
2657  * snapshot_write_finalize - Complete the loading of a hibernation image.
2658  *
2659  * Must be called after the last call to snapshot_write_next() in case the last
2660  * page in the image happens to be a highmem page and its contents should be
2661  * stored in highmem.  Additionally, it recycles bitmap memory that's not
2662  * necessary any more.
2663  */
2664 void snapshot_write_finalize(struct snapshot_handle *handle)
2665 {
2666 	copy_last_highmem_page();
2667 	/* Restore page key for data page (s390 only). */
2668 	page_key_write(handle->buffer);
2669 	page_key_free();
2670 	hibernate_restore_protect_page(handle->buffer);
2671 	/* Do that only if we have loaded the image entirely */
2672 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2673 		memory_bm_recycle(&orig_bm);
2674 		free_highmem_data();
2675 	}
2676 }
2677 
2678 int snapshot_image_loaded(struct snapshot_handle *handle)
2679 {
2680 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2681 			handle->cur <= nr_meta_pages + nr_copy_pages);
2682 }
2683 
2684 #ifdef CONFIG_HIGHMEM
2685 /* Assumes that @buf is ready and points to a "safe" page */
2686 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2687 				       void *buf)
2688 {
2689 	void *kaddr1, *kaddr2;
2690 
2691 	kaddr1 = kmap_atomic(p1);
2692 	kaddr2 = kmap_atomic(p2);
2693 	copy_page(buf, kaddr1);
2694 	copy_page(kaddr1, kaddr2);
2695 	copy_page(kaddr2, buf);
2696 	kunmap_atomic(kaddr2);
2697 	kunmap_atomic(kaddr1);
2698 }
2699 
2700 /**
2701  * restore_highmem - Put highmem image pages into their original locations.
2702  *
2703  * For each highmem page that was in use before hibernation and is included in
2704  * the image, and also has been allocated by the "restore" kernel, swap its
2705  * current contents with the previous (ie. "before hibernation") ones.
2706  *
2707  * If the restore eventually fails, we can call this function once again and
2708  * restore the highmem state as seen by the restore kernel.
2709  */
2710 int restore_highmem(void)
2711 {
2712 	struct highmem_pbe *pbe = highmem_pblist;
2713 	void *buf;
2714 
2715 	if (!pbe)
2716 		return 0;
2717 
2718 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2719 	if (!buf)
2720 		return -ENOMEM;
2721 
2722 	while (pbe) {
2723 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2724 		pbe = pbe->next;
2725 	}
2726 	free_image_page(buf, PG_UNSAFE_CLEAR);
2727 	return 0;
2728 }
2729 #endif /* CONFIG_HIGHMEM */
2730