xref: /openbmc/linux/kernel/power/snapshot.c (revision e1f7c9ee)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31 
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37 
38 #include "power.h"
39 
40 static int swsusp_page_is_free(struct page *);
41 static void swsusp_set_page_forbidden(struct page *);
42 static void swsusp_unset_page_forbidden(struct page *);
43 
44 /*
45  * Number of bytes to reserve for memory allocations made by device drivers
46  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47  * cause image creation to fail (tunable via /sys/power/reserved_size).
48  */
49 unsigned long reserved_size;
50 
51 void __init hibernate_reserved_size_init(void)
52 {
53 	reserved_size = SPARE_PAGES * PAGE_SIZE;
54 }
55 
56 /*
57  * Preferred image size in bytes (tunable via /sys/power/image_size).
58  * When it is set to N, swsusp will do its best to ensure the image
59  * size will not exceed N bytes, but if that is impossible, it will
60  * try to create the smallest image possible.
61  */
62 unsigned long image_size;
63 
64 void __init hibernate_image_size_init(void)
65 {
66 	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67 }
68 
69 /* List of PBEs needed for restoring the pages that were allocated before
70  * the suspend and included in the suspend image, but have also been
71  * allocated by the "resume" kernel, so their contents cannot be written
72  * directly to their "original" page frames.
73  */
74 struct pbe *restore_pblist;
75 
76 /* Pointer to an auxiliary buffer (1 page) */
77 static void *buffer;
78 
79 /**
80  *	@safe_needed - on resume, for storing the PBE list and the image,
81  *	we can only use memory pages that do not conflict with the pages
82  *	used before suspend.  The unsafe pages have PageNosaveFree set
83  *	and we count them using unsafe_pages.
84  *
85  *	Each allocated image page is marked as PageNosave and PageNosaveFree
86  *	so that swsusp_free() can release it.
87  */
88 
89 #define PG_ANY		0
90 #define PG_SAFE		1
91 #define PG_UNSAFE_CLEAR	1
92 #define PG_UNSAFE_KEEP	0
93 
94 static unsigned int allocated_unsafe_pages;
95 
96 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97 {
98 	void *res;
99 
100 	res = (void *)get_zeroed_page(gfp_mask);
101 	if (safe_needed)
102 		while (res && swsusp_page_is_free(virt_to_page(res))) {
103 			/* The page is unsafe, mark it for swsusp_free() */
104 			swsusp_set_page_forbidden(virt_to_page(res));
105 			allocated_unsafe_pages++;
106 			res = (void *)get_zeroed_page(gfp_mask);
107 		}
108 	if (res) {
109 		swsusp_set_page_forbidden(virt_to_page(res));
110 		swsusp_set_page_free(virt_to_page(res));
111 	}
112 	return res;
113 }
114 
115 unsigned long get_safe_page(gfp_t gfp_mask)
116 {
117 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118 }
119 
120 static struct page *alloc_image_page(gfp_t gfp_mask)
121 {
122 	struct page *page;
123 
124 	page = alloc_page(gfp_mask);
125 	if (page) {
126 		swsusp_set_page_forbidden(page);
127 		swsusp_set_page_free(page);
128 	}
129 	return page;
130 }
131 
132 /**
133  *	free_image_page - free page represented by @addr, allocated with
134  *	get_image_page (page flags set by it must be cleared)
135  */
136 
137 static inline void free_image_page(void *addr, int clear_nosave_free)
138 {
139 	struct page *page;
140 
141 	BUG_ON(!virt_addr_valid(addr));
142 
143 	page = virt_to_page(addr);
144 
145 	swsusp_unset_page_forbidden(page);
146 	if (clear_nosave_free)
147 		swsusp_unset_page_free(page);
148 
149 	__free_page(page);
150 }
151 
152 /* struct linked_page is used to build chains of pages */
153 
154 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
155 
156 struct linked_page {
157 	struct linked_page *next;
158 	char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160 
161 static inline void
162 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163 {
164 	while (list) {
165 		struct linked_page *lp = list->next;
166 
167 		free_image_page(list, clear_page_nosave);
168 		list = lp;
169 	}
170 }
171 
172 /**
173   *	struct chain_allocator is used for allocating small objects out of
174   *	a linked list of pages called 'the chain'.
175   *
176   *	The chain grows each time when there is no room for a new object in
177   *	the current page.  The allocated objects cannot be freed individually.
178   *	It is only possible to free them all at once, by freeing the entire
179   *	chain.
180   *
181   *	NOTE: The chain allocator may be inefficient if the allocated objects
182   *	are not much smaller than PAGE_SIZE.
183   */
184 
185 struct chain_allocator {
186 	struct linked_page *chain;	/* the chain */
187 	unsigned int used_space;	/* total size of objects allocated out
188 					 * of the current page
189 					 */
190 	gfp_t gfp_mask;		/* mask for allocating pages */
191 	int safe_needed;	/* if set, only "safe" pages are allocated */
192 };
193 
194 static void
195 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196 {
197 	ca->chain = NULL;
198 	ca->used_space = LINKED_PAGE_DATA_SIZE;
199 	ca->gfp_mask = gfp_mask;
200 	ca->safe_needed = safe_needed;
201 }
202 
203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204 {
205 	void *ret;
206 
207 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 		struct linked_page *lp;
209 
210 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 		if (!lp)
212 			return NULL;
213 
214 		lp->next = ca->chain;
215 		ca->chain = lp;
216 		ca->used_space = 0;
217 	}
218 	ret = ca->chain->data + ca->used_space;
219 	ca->used_space += size;
220 	return ret;
221 }
222 
223 /**
224  *	Data types related to memory bitmaps.
225  *
226  *	Memory bitmap is a structure consiting of many linked lists of
227  *	objects.  The main list's elements are of type struct zone_bitmap
228  *	and each of them corresonds to one zone.  For each zone bitmap
229  *	object there is a list of objects of type struct bm_block that
230  *	represent each blocks of bitmap in which information is stored.
231  *
232  *	struct memory_bitmap contains a pointer to the main list of zone
233  *	bitmap objects, a struct bm_position used for browsing the bitmap,
234  *	and a pointer to the list of pages used for allocating all of the
235  *	zone bitmap objects and bitmap block objects.
236  *
237  *	NOTE: It has to be possible to lay out the bitmap in memory
238  *	using only allocations of order 0.  Additionally, the bitmap is
239  *	designed to work with arbitrary number of zones (this is over the
240  *	top for now, but let's avoid making unnecessary assumptions ;-).
241  *
242  *	struct zone_bitmap contains a pointer to a list of bitmap block
243  *	objects and a pointer to the bitmap block object that has been
244  *	most recently used for setting bits.  Additionally, it contains the
245  *	pfns that correspond to the start and end of the represented zone.
246  *
247  *	struct bm_block contains a pointer to the memory page in which
248  *	information is stored (in the form of a block of bitmap)
249  *	It also contains the pfns that correspond to the start and end of
250  *	the represented memory area.
251  *
252  *	The memory bitmap is organized as a radix tree to guarantee fast random
253  *	access to the bits. There is one radix tree for each zone (as returned
254  *	from create_mem_extents).
255  *
256  *	One radix tree is represented by one struct mem_zone_bm_rtree. There are
257  *	two linked lists for the nodes of the tree, one for the inner nodes and
258  *	one for the leave nodes. The linked leave nodes are used for fast linear
259  *	access of the memory bitmap.
260  *
261  *	The struct rtree_node represents one node of the radix tree.
262  */
263 
264 #define BM_END_OF_MAP	(~0UL)
265 
266 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
269 
270 /*
271  * struct rtree_node is a wrapper struct to link the nodes
272  * of the rtree together for easy linear iteration over
273  * bits and easy freeing
274  */
275 struct rtree_node {
276 	struct list_head list;
277 	unsigned long *data;
278 };
279 
280 /*
281  * struct mem_zone_bm_rtree represents a bitmap used for one
282  * populated memory zone.
283  */
284 struct mem_zone_bm_rtree {
285 	struct list_head list;		/* Link Zones together         */
286 	struct list_head nodes;		/* Radix Tree inner nodes      */
287 	struct list_head leaves;	/* Radix Tree leaves           */
288 	unsigned long start_pfn;	/* Zone start page frame       */
289 	unsigned long end_pfn;		/* Zone end page frame + 1     */
290 	struct rtree_node *rtree;	/* Radix Tree Root             */
291 	int levels;			/* Number of Radix Tree Levels */
292 	unsigned int blocks;		/* Number of Bitmap Blocks     */
293 };
294 
295 /* strcut bm_position is used for browsing memory bitmaps */
296 
297 struct bm_position {
298 	struct mem_zone_bm_rtree *zone;
299 	struct rtree_node *node;
300 	unsigned long node_pfn;
301 	int node_bit;
302 };
303 
304 struct memory_bitmap {
305 	struct list_head zones;
306 	struct linked_page *p_list;	/* list of pages used to store zone
307 					 * bitmap objects and bitmap block
308 					 * objects
309 					 */
310 	struct bm_position cur;	/* most recently used bit position */
311 };
312 
313 /* Functions that operate on memory bitmaps */
314 
315 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
318 #else
319 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
320 #endif
321 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322 
323 /*
324  *	alloc_rtree_node - Allocate a new node and add it to the radix tree.
325  *
326  *	This function is used to allocate inner nodes as well as the
327  *	leave nodes of the radix tree. It also adds the node to the
328  *	corresponding linked list passed in by the *list parameter.
329  */
330 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 					   struct chain_allocator *ca,
332 					   struct list_head *list)
333 {
334 	struct rtree_node *node;
335 
336 	node = chain_alloc(ca, sizeof(struct rtree_node));
337 	if (!node)
338 		return NULL;
339 
340 	node->data = get_image_page(gfp_mask, safe_needed);
341 	if (!node->data)
342 		return NULL;
343 
344 	list_add_tail(&node->list, list);
345 
346 	return node;
347 }
348 
349 /*
350  *	add_rtree_block - Add a new leave node to the radix tree
351  *
352  *	The leave nodes need to be allocated in order to keep the leaves
353  *	linked list in order. This is guaranteed by the zone->blocks
354  *	counter.
355  */
356 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
357 			   int safe_needed, struct chain_allocator *ca)
358 {
359 	struct rtree_node *node, *block, **dst;
360 	unsigned int levels_needed, block_nr;
361 	int i;
362 
363 	block_nr = zone->blocks;
364 	levels_needed = 0;
365 
366 	/* How many levels do we need for this block nr? */
367 	while (block_nr) {
368 		levels_needed += 1;
369 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 	}
371 
372 	/* Make sure the rtree has enough levels */
373 	for (i = zone->levels; i < levels_needed; i++) {
374 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 					&zone->nodes);
376 		if (!node)
377 			return -ENOMEM;
378 
379 		node->data[0] = (unsigned long)zone->rtree;
380 		zone->rtree = node;
381 		zone->levels += 1;
382 	}
383 
384 	/* Allocate new block */
385 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 	if (!block)
387 		return -ENOMEM;
388 
389 	/* Now walk the rtree to insert the block */
390 	node = zone->rtree;
391 	dst = &zone->rtree;
392 	block_nr = zone->blocks;
393 	for (i = zone->levels; i > 0; i--) {
394 		int index;
395 
396 		if (!node) {
397 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 						&zone->nodes);
399 			if (!node)
400 				return -ENOMEM;
401 			*dst = node;
402 		}
403 
404 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 		index &= BM_RTREE_LEVEL_MASK;
406 		dst = (struct rtree_node **)&((*dst)->data[index]);
407 		node = *dst;
408 	}
409 
410 	zone->blocks += 1;
411 	*dst = block;
412 
413 	return 0;
414 }
415 
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 			       int clear_nosave_free);
418 
419 /*
420  *	create_zone_bm_rtree - create a radix tree for one zone
421  *
422  *	Allocated the mem_zone_bm_rtree structure and initializes it.
423  *	This function also allocated and builds the radix tree for the
424  *	zone.
425  */
426 static struct mem_zone_bm_rtree *
427 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 		     struct chain_allocator *ca,
429 		     unsigned long start, unsigned long end)
430 {
431 	struct mem_zone_bm_rtree *zone;
432 	unsigned int i, nr_blocks;
433 	unsigned long pages;
434 
435 	pages = end - start;
436 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 	if (!zone)
438 		return NULL;
439 
440 	INIT_LIST_HEAD(&zone->nodes);
441 	INIT_LIST_HEAD(&zone->leaves);
442 	zone->start_pfn = start;
443 	zone->end_pfn = end;
444 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445 
446 	for (i = 0; i < nr_blocks; i++) {
447 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 			return NULL;
450 		}
451 	}
452 
453 	return zone;
454 }
455 
456 /*
457  *	free_zone_bm_rtree - Free the memory of the radix tree
458  *
459  *	Free all node pages of the radix tree. The mem_zone_bm_rtree
460  *	structure itself is not freed here nor are the rtree_node
461  *	structs.
462  */
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 			       int clear_nosave_free)
465 {
466 	struct rtree_node *node;
467 
468 	list_for_each_entry(node, &zone->nodes, list)
469 		free_image_page(node->data, clear_nosave_free);
470 
471 	list_for_each_entry(node, &zone->leaves, list)
472 		free_image_page(node->data, clear_nosave_free);
473 }
474 
475 static void memory_bm_position_reset(struct memory_bitmap *bm)
476 {
477 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 				  list);
479 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 				  struct rtree_node, list);
481 	bm->cur.node_pfn = 0;
482 	bm->cur.node_bit = 0;
483 }
484 
485 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486 
487 struct mem_extent {
488 	struct list_head hook;
489 	unsigned long start;
490 	unsigned long end;
491 };
492 
493 /**
494  *	free_mem_extents - free a list of memory extents
495  *	@list - list of extents to empty
496  */
497 static void free_mem_extents(struct list_head *list)
498 {
499 	struct mem_extent *ext, *aux;
500 
501 	list_for_each_entry_safe(ext, aux, list, hook) {
502 		list_del(&ext->hook);
503 		kfree(ext);
504 	}
505 }
506 
507 /**
508  *	create_mem_extents - create a list of memory extents representing
509  *	                     contiguous ranges of PFNs
510  *	@list - list to put the extents into
511  *	@gfp_mask - mask to use for memory allocations
512  */
513 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
514 {
515 	struct zone *zone;
516 
517 	INIT_LIST_HEAD(list);
518 
519 	for_each_populated_zone(zone) {
520 		unsigned long zone_start, zone_end;
521 		struct mem_extent *ext, *cur, *aux;
522 
523 		zone_start = zone->zone_start_pfn;
524 		zone_end = zone_end_pfn(zone);
525 
526 		list_for_each_entry(ext, list, hook)
527 			if (zone_start <= ext->end)
528 				break;
529 
530 		if (&ext->hook == list || zone_end < ext->start) {
531 			/* New extent is necessary */
532 			struct mem_extent *new_ext;
533 
534 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
535 			if (!new_ext) {
536 				free_mem_extents(list);
537 				return -ENOMEM;
538 			}
539 			new_ext->start = zone_start;
540 			new_ext->end = zone_end;
541 			list_add_tail(&new_ext->hook, &ext->hook);
542 			continue;
543 		}
544 
545 		/* Merge this zone's range of PFNs with the existing one */
546 		if (zone_start < ext->start)
547 			ext->start = zone_start;
548 		if (zone_end > ext->end)
549 			ext->end = zone_end;
550 
551 		/* More merging may be possible */
552 		cur = ext;
553 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
554 			if (zone_end < cur->start)
555 				break;
556 			if (zone_end < cur->end)
557 				ext->end = cur->end;
558 			list_del(&cur->hook);
559 			kfree(cur);
560 		}
561 	}
562 
563 	return 0;
564 }
565 
566 /**
567   *	memory_bm_create - allocate memory for a memory bitmap
568   */
569 static int
570 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
571 {
572 	struct chain_allocator ca;
573 	struct list_head mem_extents;
574 	struct mem_extent *ext;
575 	int error;
576 
577 	chain_init(&ca, gfp_mask, safe_needed);
578 	INIT_LIST_HEAD(&bm->zones);
579 
580 	error = create_mem_extents(&mem_extents, gfp_mask);
581 	if (error)
582 		return error;
583 
584 	list_for_each_entry(ext, &mem_extents, hook) {
585 		struct mem_zone_bm_rtree *zone;
586 
587 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
588 					    ext->start, ext->end);
589 		if (!zone) {
590 			error = -ENOMEM;
591 			goto Error;
592 		}
593 		list_add_tail(&zone->list, &bm->zones);
594 	}
595 
596 	bm->p_list = ca.chain;
597 	memory_bm_position_reset(bm);
598  Exit:
599 	free_mem_extents(&mem_extents);
600 	return error;
601 
602  Error:
603 	bm->p_list = ca.chain;
604 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
605 	goto Exit;
606 }
607 
608 /**
609   *	memory_bm_free - free memory occupied by the memory bitmap @bm
610   */
611 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
612 {
613 	struct mem_zone_bm_rtree *zone;
614 
615 	list_for_each_entry(zone, &bm->zones, list)
616 		free_zone_bm_rtree(zone, clear_nosave_free);
617 
618 	free_list_of_pages(bm->p_list, clear_nosave_free);
619 
620 	INIT_LIST_HEAD(&bm->zones);
621 }
622 
623 /**
624  *	memory_bm_find_bit - Find the bit for pfn in the memory
625  *			     bitmap
626  *
627  *	Find the bit in the bitmap @bm that corresponds to given pfn.
628  *	The cur.zone, cur.block and cur.node_pfn member of @bm are
629  *	updated.
630  *	It walks the radix tree to find the page which contains the bit for
631  *	pfn and returns the bit position in **addr and *bit_nr.
632  */
633 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
634 			      void **addr, unsigned int *bit_nr)
635 {
636 	struct mem_zone_bm_rtree *curr, *zone;
637 	struct rtree_node *node;
638 	int i, block_nr;
639 
640 	zone = bm->cur.zone;
641 
642 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 		goto zone_found;
644 
645 	zone = NULL;
646 
647 	/* Find the right zone */
648 	list_for_each_entry(curr, &bm->zones, list) {
649 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 			zone = curr;
651 			break;
652 		}
653 	}
654 
655 	if (!zone)
656 		return -EFAULT;
657 
658 zone_found:
659 	/*
660 	 * We have a zone. Now walk the radix tree to find the leave
661 	 * node for our pfn.
662 	 */
663 
664 	node = bm->cur.node;
665 	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
666 		goto node_found;
667 
668 	node      = zone->rtree;
669 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670 
671 	for (i = zone->levels; i > 0; i--) {
672 		int index;
673 
674 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 		index &= BM_RTREE_LEVEL_MASK;
676 		BUG_ON(node->data[index] == 0);
677 		node = (struct rtree_node *)node->data[index];
678 	}
679 
680 node_found:
681 	/* Update last position */
682 	bm->cur.zone = zone;
683 	bm->cur.node = node;
684 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685 
686 	/* Set return values */
687 	*addr = node->data;
688 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
689 
690 	return 0;
691 }
692 
693 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
694 {
695 	void *addr;
696 	unsigned int bit;
697 	int error;
698 
699 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
700 	BUG_ON(error);
701 	set_bit(bit, addr);
702 }
703 
704 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
705 {
706 	void *addr;
707 	unsigned int bit;
708 	int error;
709 
710 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
711 	if (!error)
712 		set_bit(bit, addr);
713 
714 	return error;
715 }
716 
717 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
718 {
719 	void *addr;
720 	unsigned int bit;
721 	int error;
722 
723 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
724 	BUG_ON(error);
725 	clear_bit(bit, addr);
726 }
727 
728 static void memory_bm_clear_current(struct memory_bitmap *bm)
729 {
730 	int bit;
731 
732 	bit = max(bm->cur.node_bit - 1, 0);
733 	clear_bit(bit, bm->cur.node->data);
734 }
735 
736 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
737 {
738 	void *addr;
739 	unsigned int bit;
740 	int error;
741 
742 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
743 	BUG_ON(error);
744 	return test_bit(bit, addr);
745 }
746 
747 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
748 {
749 	void *addr;
750 	unsigned int bit;
751 
752 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
753 }
754 
755 /*
756  *	rtree_next_node - Jumps to the next leave node
757  *
758  *	Sets the position to the beginning of the next node in the
759  *	memory bitmap. This is either the next node in the current
760  *	zone's radix tree or the first node in the radix tree of the
761  *	next zone.
762  *
763  *	Returns true if there is a next node, false otherwise.
764  */
765 static bool rtree_next_node(struct memory_bitmap *bm)
766 {
767 	bm->cur.node = list_entry(bm->cur.node->list.next,
768 				  struct rtree_node, list);
769 	if (&bm->cur.node->list != &bm->cur.zone->leaves) {
770 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
771 		bm->cur.node_bit  = 0;
772 		touch_softlockup_watchdog();
773 		return true;
774 	}
775 
776 	/* No more nodes, goto next zone */
777 	bm->cur.zone = list_entry(bm->cur.zone->list.next,
778 				  struct mem_zone_bm_rtree, list);
779 	if (&bm->cur.zone->list != &bm->zones) {
780 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
781 					  struct rtree_node, list);
782 		bm->cur.node_pfn = 0;
783 		bm->cur.node_bit = 0;
784 		return true;
785 	}
786 
787 	/* No more zones */
788 	return false;
789 }
790 
791 /**
792  *	memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
793  *
794  *	Starting from the last returned position this function searches
795  *	for the next set bit in the memory bitmap and returns its
796  *	number. If no more bit is set BM_END_OF_MAP is returned.
797  *
798  *	It is required to run memory_bm_position_reset() before the
799  *	first call to this function.
800  */
801 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
802 {
803 	unsigned long bits, pfn, pages;
804 	int bit;
805 
806 	do {
807 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
808 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
809 		bit	  = find_next_bit(bm->cur.node->data, bits,
810 					  bm->cur.node_bit);
811 		if (bit < bits) {
812 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
813 			bm->cur.node_bit = bit + 1;
814 			return pfn;
815 		}
816 	} while (rtree_next_node(bm));
817 
818 	return BM_END_OF_MAP;
819 }
820 
821 /**
822  *	This structure represents a range of page frames the contents of which
823  *	should not be saved during the suspend.
824  */
825 
826 struct nosave_region {
827 	struct list_head list;
828 	unsigned long start_pfn;
829 	unsigned long end_pfn;
830 };
831 
832 static LIST_HEAD(nosave_regions);
833 
834 /**
835  *	register_nosave_region - register a range of page frames the contents
836  *	of which should not be saved during the suspend (to be used in the early
837  *	initialization code)
838  */
839 
840 void __init
841 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
842 			 int use_kmalloc)
843 {
844 	struct nosave_region *region;
845 
846 	if (start_pfn >= end_pfn)
847 		return;
848 
849 	if (!list_empty(&nosave_regions)) {
850 		/* Try to extend the previous region (they should be sorted) */
851 		region = list_entry(nosave_regions.prev,
852 					struct nosave_region, list);
853 		if (region->end_pfn == start_pfn) {
854 			region->end_pfn = end_pfn;
855 			goto Report;
856 		}
857 	}
858 	if (use_kmalloc) {
859 		/* during init, this shouldn't fail */
860 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
861 		BUG_ON(!region);
862 	} else
863 		/* This allocation cannot fail */
864 		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
865 	region->start_pfn = start_pfn;
866 	region->end_pfn = end_pfn;
867 	list_add_tail(&region->list, &nosave_regions);
868  Report:
869 	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
870 		(unsigned long long) start_pfn << PAGE_SHIFT,
871 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
872 }
873 
874 /*
875  * Set bits in this map correspond to the page frames the contents of which
876  * should not be saved during the suspend.
877  */
878 static struct memory_bitmap *forbidden_pages_map;
879 
880 /* Set bits in this map correspond to free page frames. */
881 static struct memory_bitmap *free_pages_map;
882 
883 /*
884  * Each page frame allocated for creating the image is marked by setting the
885  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
886  */
887 
888 void swsusp_set_page_free(struct page *page)
889 {
890 	if (free_pages_map)
891 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
892 }
893 
894 static int swsusp_page_is_free(struct page *page)
895 {
896 	return free_pages_map ?
897 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
898 }
899 
900 void swsusp_unset_page_free(struct page *page)
901 {
902 	if (free_pages_map)
903 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
904 }
905 
906 static void swsusp_set_page_forbidden(struct page *page)
907 {
908 	if (forbidden_pages_map)
909 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
910 }
911 
912 int swsusp_page_is_forbidden(struct page *page)
913 {
914 	return forbidden_pages_map ?
915 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
916 }
917 
918 static void swsusp_unset_page_forbidden(struct page *page)
919 {
920 	if (forbidden_pages_map)
921 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
922 }
923 
924 /**
925  *	mark_nosave_pages - set bits corresponding to the page frames the
926  *	contents of which should not be saved in a given bitmap.
927  */
928 
929 static void mark_nosave_pages(struct memory_bitmap *bm)
930 {
931 	struct nosave_region *region;
932 
933 	if (list_empty(&nosave_regions))
934 		return;
935 
936 	list_for_each_entry(region, &nosave_regions, list) {
937 		unsigned long pfn;
938 
939 		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
940 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
941 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
942 				- 1);
943 
944 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
945 			if (pfn_valid(pfn)) {
946 				/*
947 				 * It is safe to ignore the result of
948 				 * mem_bm_set_bit_check() here, since we won't
949 				 * touch the PFNs for which the error is
950 				 * returned anyway.
951 				 */
952 				mem_bm_set_bit_check(bm, pfn);
953 			}
954 	}
955 }
956 
957 static bool is_nosave_page(unsigned long pfn)
958 {
959 	struct nosave_region *region;
960 
961 	list_for_each_entry(region, &nosave_regions, list) {
962 		if (pfn >= region->start_pfn && pfn < region->end_pfn) {
963 			pr_err("PM: %#010llx in e820 nosave region: "
964 			       "[mem %#010llx-%#010llx]\n",
965 			       (unsigned long long) pfn << PAGE_SHIFT,
966 			       (unsigned long long) region->start_pfn << PAGE_SHIFT,
967 			       ((unsigned long long) region->end_pfn << PAGE_SHIFT)
968 					- 1);
969 			return true;
970 		}
971 	}
972 
973 	return false;
974 }
975 
976 /**
977  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
978  *	frames that should not be saved and free page frames.  The pointers
979  *	forbidden_pages_map and free_pages_map are only modified if everything
980  *	goes well, because we don't want the bits to be used before both bitmaps
981  *	are set up.
982  */
983 
984 int create_basic_memory_bitmaps(void)
985 {
986 	struct memory_bitmap *bm1, *bm2;
987 	int error = 0;
988 
989 	if (forbidden_pages_map && free_pages_map)
990 		return 0;
991 	else
992 		BUG_ON(forbidden_pages_map || free_pages_map);
993 
994 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
995 	if (!bm1)
996 		return -ENOMEM;
997 
998 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
999 	if (error)
1000 		goto Free_first_object;
1001 
1002 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1003 	if (!bm2)
1004 		goto Free_first_bitmap;
1005 
1006 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1007 	if (error)
1008 		goto Free_second_object;
1009 
1010 	forbidden_pages_map = bm1;
1011 	free_pages_map = bm2;
1012 	mark_nosave_pages(forbidden_pages_map);
1013 
1014 	pr_debug("PM: Basic memory bitmaps created\n");
1015 
1016 	return 0;
1017 
1018  Free_second_object:
1019 	kfree(bm2);
1020  Free_first_bitmap:
1021  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1022  Free_first_object:
1023 	kfree(bm1);
1024 	return -ENOMEM;
1025 }
1026 
1027 /**
1028  *	free_basic_memory_bitmaps - free memory bitmaps allocated by
1029  *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
1030  *	so that the bitmaps themselves are not referred to while they are being
1031  *	freed.
1032  */
1033 
1034 void free_basic_memory_bitmaps(void)
1035 {
1036 	struct memory_bitmap *bm1, *bm2;
1037 
1038 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1039 		return;
1040 
1041 	bm1 = forbidden_pages_map;
1042 	bm2 = free_pages_map;
1043 	forbidden_pages_map = NULL;
1044 	free_pages_map = NULL;
1045 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1046 	kfree(bm1);
1047 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1048 	kfree(bm2);
1049 
1050 	pr_debug("PM: Basic memory bitmaps freed\n");
1051 }
1052 
1053 /**
1054  *	snapshot_additional_pages - estimate the number of additional pages
1055  *	be needed for setting up the suspend image data structures for given
1056  *	zone (usually the returned value is greater than the exact number)
1057  */
1058 
1059 unsigned int snapshot_additional_pages(struct zone *zone)
1060 {
1061 	unsigned int rtree, nodes;
1062 
1063 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1064 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1065 			      LINKED_PAGE_DATA_SIZE);
1066 	while (nodes > 1) {
1067 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1068 		rtree += nodes;
1069 	}
1070 
1071 	return 2 * rtree;
1072 }
1073 
1074 #ifdef CONFIG_HIGHMEM
1075 /**
1076  *	count_free_highmem_pages - compute the total number of free highmem
1077  *	pages, system-wide.
1078  */
1079 
1080 static unsigned int count_free_highmem_pages(void)
1081 {
1082 	struct zone *zone;
1083 	unsigned int cnt = 0;
1084 
1085 	for_each_populated_zone(zone)
1086 		if (is_highmem(zone))
1087 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1088 
1089 	return cnt;
1090 }
1091 
1092 /**
1093  *	saveable_highmem_page - Determine whether a highmem page should be
1094  *	included in the suspend image.
1095  *
1096  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1097  *	and it isn't a part of a free chunk of pages.
1098  */
1099 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1100 {
1101 	struct page *page;
1102 
1103 	if (!pfn_valid(pfn))
1104 		return NULL;
1105 
1106 	page = pfn_to_page(pfn);
1107 	if (page_zone(page) != zone)
1108 		return NULL;
1109 
1110 	BUG_ON(!PageHighMem(page));
1111 
1112 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1113 	    PageReserved(page))
1114 		return NULL;
1115 
1116 	if (page_is_guard(page))
1117 		return NULL;
1118 
1119 	return page;
1120 }
1121 
1122 /**
1123  *	count_highmem_pages - compute the total number of saveable highmem
1124  *	pages.
1125  */
1126 
1127 static unsigned int count_highmem_pages(void)
1128 {
1129 	struct zone *zone;
1130 	unsigned int n = 0;
1131 
1132 	for_each_populated_zone(zone) {
1133 		unsigned long pfn, max_zone_pfn;
1134 
1135 		if (!is_highmem(zone))
1136 			continue;
1137 
1138 		mark_free_pages(zone);
1139 		max_zone_pfn = zone_end_pfn(zone);
1140 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1141 			if (saveable_highmem_page(zone, pfn))
1142 				n++;
1143 	}
1144 	return n;
1145 }
1146 #else
1147 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1148 {
1149 	return NULL;
1150 }
1151 #endif /* CONFIG_HIGHMEM */
1152 
1153 /**
1154  *	saveable_page - Determine whether a non-highmem page should be included
1155  *	in the suspend image.
1156  *
1157  *	We should save the page if it isn't Nosave, and is not in the range
1158  *	of pages statically defined as 'unsaveable', and it isn't a part of
1159  *	a free chunk of pages.
1160  */
1161 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1162 {
1163 	struct page *page;
1164 
1165 	if (!pfn_valid(pfn))
1166 		return NULL;
1167 
1168 	page = pfn_to_page(pfn);
1169 	if (page_zone(page) != zone)
1170 		return NULL;
1171 
1172 	BUG_ON(PageHighMem(page));
1173 
1174 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1175 		return NULL;
1176 
1177 	if (PageReserved(page)
1178 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1179 		return NULL;
1180 
1181 	if (page_is_guard(page))
1182 		return NULL;
1183 
1184 	return page;
1185 }
1186 
1187 /**
1188  *	count_data_pages - compute the total number of saveable non-highmem
1189  *	pages.
1190  */
1191 
1192 static unsigned int count_data_pages(void)
1193 {
1194 	struct zone *zone;
1195 	unsigned long pfn, max_zone_pfn;
1196 	unsigned int n = 0;
1197 
1198 	for_each_populated_zone(zone) {
1199 		if (is_highmem(zone))
1200 			continue;
1201 
1202 		mark_free_pages(zone);
1203 		max_zone_pfn = zone_end_pfn(zone);
1204 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1205 			if (saveable_page(zone, pfn))
1206 				n++;
1207 	}
1208 	return n;
1209 }
1210 
1211 /* This is needed, because copy_page and memcpy are not usable for copying
1212  * task structs.
1213  */
1214 static inline void do_copy_page(long *dst, long *src)
1215 {
1216 	int n;
1217 
1218 	for (n = PAGE_SIZE / sizeof(long); n; n--)
1219 		*dst++ = *src++;
1220 }
1221 
1222 
1223 /**
1224  *	safe_copy_page - check if the page we are going to copy is marked as
1225  *		present in the kernel page tables (this always is the case if
1226  *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
1227  *		kernel_page_present() always returns 'true').
1228  */
1229 static void safe_copy_page(void *dst, struct page *s_page)
1230 {
1231 	if (kernel_page_present(s_page)) {
1232 		do_copy_page(dst, page_address(s_page));
1233 	} else {
1234 		kernel_map_pages(s_page, 1, 1);
1235 		do_copy_page(dst, page_address(s_page));
1236 		kernel_map_pages(s_page, 1, 0);
1237 	}
1238 }
1239 
1240 
1241 #ifdef CONFIG_HIGHMEM
1242 static inline struct page *
1243 page_is_saveable(struct zone *zone, unsigned long pfn)
1244 {
1245 	return is_highmem(zone) ?
1246 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1247 }
1248 
1249 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1250 {
1251 	struct page *s_page, *d_page;
1252 	void *src, *dst;
1253 
1254 	s_page = pfn_to_page(src_pfn);
1255 	d_page = pfn_to_page(dst_pfn);
1256 	if (PageHighMem(s_page)) {
1257 		src = kmap_atomic(s_page);
1258 		dst = kmap_atomic(d_page);
1259 		do_copy_page(dst, src);
1260 		kunmap_atomic(dst);
1261 		kunmap_atomic(src);
1262 	} else {
1263 		if (PageHighMem(d_page)) {
1264 			/* Page pointed to by src may contain some kernel
1265 			 * data modified by kmap_atomic()
1266 			 */
1267 			safe_copy_page(buffer, s_page);
1268 			dst = kmap_atomic(d_page);
1269 			copy_page(dst, buffer);
1270 			kunmap_atomic(dst);
1271 		} else {
1272 			safe_copy_page(page_address(d_page), s_page);
1273 		}
1274 	}
1275 }
1276 #else
1277 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1278 
1279 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1280 {
1281 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1282 				pfn_to_page(src_pfn));
1283 }
1284 #endif /* CONFIG_HIGHMEM */
1285 
1286 static void
1287 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1288 {
1289 	struct zone *zone;
1290 	unsigned long pfn;
1291 
1292 	for_each_populated_zone(zone) {
1293 		unsigned long max_zone_pfn;
1294 
1295 		mark_free_pages(zone);
1296 		max_zone_pfn = zone_end_pfn(zone);
1297 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1298 			if (page_is_saveable(zone, pfn))
1299 				memory_bm_set_bit(orig_bm, pfn);
1300 	}
1301 	memory_bm_position_reset(orig_bm);
1302 	memory_bm_position_reset(copy_bm);
1303 	for(;;) {
1304 		pfn = memory_bm_next_pfn(orig_bm);
1305 		if (unlikely(pfn == BM_END_OF_MAP))
1306 			break;
1307 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1308 	}
1309 }
1310 
1311 /* Total number of image pages */
1312 static unsigned int nr_copy_pages;
1313 /* Number of pages needed for saving the original pfns of the image pages */
1314 static unsigned int nr_meta_pages;
1315 /*
1316  * Numbers of normal and highmem page frames allocated for hibernation image
1317  * before suspending devices.
1318  */
1319 unsigned int alloc_normal, alloc_highmem;
1320 /*
1321  * Memory bitmap used for marking saveable pages (during hibernation) or
1322  * hibernation image pages (during restore)
1323  */
1324 static struct memory_bitmap orig_bm;
1325 /*
1326  * Memory bitmap used during hibernation for marking allocated page frames that
1327  * will contain copies of saveable pages.  During restore it is initially used
1328  * for marking hibernation image pages, but then the set bits from it are
1329  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1330  * used for marking "safe" highmem pages, but it has to be reinitialized for
1331  * this purpose.
1332  */
1333 static struct memory_bitmap copy_bm;
1334 
1335 /**
1336  *	swsusp_free - free pages allocated for the suspend.
1337  *
1338  *	Suspend pages are alocated before the atomic copy is made, so we
1339  *	need to release them after the resume.
1340  */
1341 
1342 void swsusp_free(void)
1343 {
1344 	unsigned long fb_pfn, fr_pfn;
1345 
1346 	if (!forbidden_pages_map || !free_pages_map)
1347 		goto out;
1348 
1349 	memory_bm_position_reset(forbidden_pages_map);
1350 	memory_bm_position_reset(free_pages_map);
1351 
1352 loop:
1353 	fr_pfn = memory_bm_next_pfn(free_pages_map);
1354 	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1355 
1356 	/*
1357 	 * Find the next bit set in both bitmaps. This is guaranteed to
1358 	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1359 	 */
1360 	do {
1361 		if (fb_pfn < fr_pfn)
1362 			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1363 		if (fr_pfn < fb_pfn)
1364 			fr_pfn = memory_bm_next_pfn(free_pages_map);
1365 	} while (fb_pfn != fr_pfn);
1366 
1367 	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1368 		struct page *page = pfn_to_page(fr_pfn);
1369 
1370 		memory_bm_clear_current(forbidden_pages_map);
1371 		memory_bm_clear_current(free_pages_map);
1372 		__free_page(page);
1373 		goto loop;
1374 	}
1375 
1376 out:
1377 	nr_copy_pages = 0;
1378 	nr_meta_pages = 0;
1379 	restore_pblist = NULL;
1380 	buffer = NULL;
1381 	alloc_normal = 0;
1382 	alloc_highmem = 0;
1383 }
1384 
1385 /* Helper functions used for the shrinking of memory. */
1386 
1387 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1388 
1389 /**
1390  * preallocate_image_pages - Allocate a number of pages for hibernation image
1391  * @nr_pages: Number of page frames to allocate.
1392  * @mask: GFP flags to use for the allocation.
1393  *
1394  * Return value: Number of page frames actually allocated
1395  */
1396 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1397 {
1398 	unsigned long nr_alloc = 0;
1399 
1400 	while (nr_pages > 0) {
1401 		struct page *page;
1402 
1403 		page = alloc_image_page(mask);
1404 		if (!page)
1405 			break;
1406 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1407 		if (PageHighMem(page))
1408 			alloc_highmem++;
1409 		else
1410 			alloc_normal++;
1411 		nr_pages--;
1412 		nr_alloc++;
1413 	}
1414 
1415 	return nr_alloc;
1416 }
1417 
1418 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1419 					      unsigned long avail_normal)
1420 {
1421 	unsigned long alloc;
1422 
1423 	if (avail_normal <= alloc_normal)
1424 		return 0;
1425 
1426 	alloc = avail_normal - alloc_normal;
1427 	if (nr_pages < alloc)
1428 		alloc = nr_pages;
1429 
1430 	return preallocate_image_pages(alloc, GFP_IMAGE);
1431 }
1432 
1433 #ifdef CONFIG_HIGHMEM
1434 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1435 {
1436 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1437 }
1438 
1439 /**
1440  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1441  */
1442 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1443 {
1444 	x *= multiplier;
1445 	do_div(x, base);
1446 	return (unsigned long)x;
1447 }
1448 
1449 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1450 						unsigned long highmem,
1451 						unsigned long total)
1452 {
1453 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1454 
1455 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1456 }
1457 #else /* CONFIG_HIGHMEM */
1458 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1459 {
1460 	return 0;
1461 }
1462 
1463 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1464 						unsigned long highmem,
1465 						unsigned long total)
1466 {
1467 	return 0;
1468 }
1469 #endif /* CONFIG_HIGHMEM */
1470 
1471 /**
1472  * free_unnecessary_pages - Release preallocated pages not needed for the image
1473  */
1474 static void free_unnecessary_pages(void)
1475 {
1476 	unsigned long save, to_free_normal, to_free_highmem;
1477 
1478 	save = count_data_pages();
1479 	if (alloc_normal >= save) {
1480 		to_free_normal = alloc_normal - save;
1481 		save = 0;
1482 	} else {
1483 		to_free_normal = 0;
1484 		save -= alloc_normal;
1485 	}
1486 	save += count_highmem_pages();
1487 	if (alloc_highmem >= save) {
1488 		to_free_highmem = alloc_highmem - save;
1489 	} else {
1490 		to_free_highmem = 0;
1491 		save -= alloc_highmem;
1492 		if (to_free_normal > save)
1493 			to_free_normal -= save;
1494 		else
1495 			to_free_normal = 0;
1496 	}
1497 
1498 	memory_bm_position_reset(&copy_bm);
1499 
1500 	while (to_free_normal > 0 || to_free_highmem > 0) {
1501 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1502 		struct page *page = pfn_to_page(pfn);
1503 
1504 		if (PageHighMem(page)) {
1505 			if (!to_free_highmem)
1506 				continue;
1507 			to_free_highmem--;
1508 			alloc_highmem--;
1509 		} else {
1510 			if (!to_free_normal)
1511 				continue;
1512 			to_free_normal--;
1513 			alloc_normal--;
1514 		}
1515 		memory_bm_clear_bit(&copy_bm, pfn);
1516 		swsusp_unset_page_forbidden(page);
1517 		swsusp_unset_page_free(page);
1518 		__free_page(page);
1519 	}
1520 }
1521 
1522 /**
1523  * minimum_image_size - Estimate the minimum acceptable size of an image
1524  * @saveable: Number of saveable pages in the system.
1525  *
1526  * We want to avoid attempting to free too much memory too hard, so estimate the
1527  * minimum acceptable size of a hibernation image to use as the lower limit for
1528  * preallocating memory.
1529  *
1530  * We assume that the minimum image size should be proportional to
1531  *
1532  * [number of saveable pages] - [number of pages that can be freed in theory]
1533  *
1534  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1535  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1536  * minus mapped file pages.
1537  */
1538 static unsigned long minimum_image_size(unsigned long saveable)
1539 {
1540 	unsigned long size;
1541 
1542 	size = global_page_state(NR_SLAB_RECLAIMABLE)
1543 		+ global_page_state(NR_ACTIVE_ANON)
1544 		+ global_page_state(NR_INACTIVE_ANON)
1545 		+ global_page_state(NR_ACTIVE_FILE)
1546 		+ global_page_state(NR_INACTIVE_FILE)
1547 		- global_page_state(NR_FILE_MAPPED);
1548 
1549 	return saveable <= size ? 0 : saveable - size;
1550 }
1551 
1552 /**
1553  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1554  *
1555  * To create a hibernation image it is necessary to make a copy of every page
1556  * frame in use.  We also need a number of page frames to be free during
1557  * hibernation for allocations made while saving the image and for device
1558  * drivers, in case they need to allocate memory from their hibernation
1559  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1560  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1561  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1562  * total number of available page frames and allocate at least
1563  *
1564  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1565  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1566  *
1567  * of them, which corresponds to the maximum size of a hibernation image.
1568  *
1569  * If image_size is set below the number following from the above formula,
1570  * the preallocation of memory is continued until the total number of saveable
1571  * pages in the system is below the requested image size or the minimum
1572  * acceptable image size returned by minimum_image_size(), whichever is greater.
1573  */
1574 int hibernate_preallocate_memory(void)
1575 {
1576 	struct zone *zone;
1577 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1578 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1579 	struct timeval start, stop;
1580 	int error;
1581 
1582 	printk(KERN_INFO "PM: Preallocating image memory... ");
1583 	do_gettimeofday(&start);
1584 
1585 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1586 	if (error)
1587 		goto err_out;
1588 
1589 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1590 	if (error)
1591 		goto err_out;
1592 
1593 	alloc_normal = 0;
1594 	alloc_highmem = 0;
1595 
1596 	/* Count the number of saveable data pages. */
1597 	save_highmem = count_highmem_pages();
1598 	saveable = count_data_pages();
1599 
1600 	/*
1601 	 * Compute the total number of page frames we can use (count) and the
1602 	 * number of pages needed for image metadata (size).
1603 	 */
1604 	count = saveable;
1605 	saveable += save_highmem;
1606 	highmem = save_highmem;
1607 	size = 0;
1608 	for_each_populated_zone(zone) {
1609 		size += snapshot_additional_pages(zone);
1610 		if (is_highmem(zone))
1611 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1612 		else
1613 			count += zone_page_state(zone, NR_FREE_PAGES);
1614 	}
1615 	avail_normal = count;
1616 	count += highmem;
1617 	count -= totalreserve_pages;
1618 
1619 	/* Add number of pages required for page keys (s390 only). */
1620 	size += page_key_additional_pages(saveable);
1621 
1622 	/* Compute the maximum number of saveable pages to leave in memory. */
1623 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1624 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1625 	/* Compute the desired number of image pages specified by image_size. */
1626 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1627 	if (size > max_size)
1628 		size = max_size;
1629 	/*
1630 	 * If the desired number of image pages is at least as large as the
1631 	 * current number of saveable pages in memory, allocate page frames for
1632 	 * the image and we're done.
1633 	 */
1634 	if (size >= saveable) {
1635 		pages = preallocate_image_highmem(save_highmem);
1636 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1637 		goto out;
1638 	}
1639 
1640 	/* Estimate the minimum size of the image. */
1641 	pages = minimum_image_size(saveable);
1642 	/*
1643 	 * To avoid excessive pressure on the normal zone, leave room in it to
1644 	 * accommodate an image of the minimum size (unless it's already too
1645 	 * small, in which case don't preallocate pages from it at all).
1646 	 */
1647 	if (avail_normal > pages)
1648 		avail_normal -= pages;
1649 	else
1650 		avail_normal = 0;
1651 	if (size < pages)
1652 		size = min_t(unsigned long, pages, max_size);
1653 
1654 	/*
1655 	 * Let the memory management subsystem know that we're going to need a
1656 	 * large number of page frames to allocate and make it free some memory.
1657 	 * NOTE: If this is not done, performance will be hurt badly in some
1658 	 * test cases.
1659 	 */
1660 	shrink_all_memory(saveable - size);
1661 
1662 	/*
1663 	 * The number of saveable pages in memory was too high, so apply some
1664 	 * pressure to decrease it.  First, make room for the largest possible
1665 	 * image and fail if that doesn't work.  Next, try to decrease the size
1666 	 * of the image as much as indicated by 'size' using allocations from
1667 	 * highmem and non-highmem zones separately.
1668 	 */
1669 	pages_highmem = preallocate_image_highmem(highmem / 2);
1670 	alloc = count - max_size;
1671 	if (alloc > pages_highmem)
1672 		alloc -= pages_highmem;
1673 	else
1674 		alloc = 0;
1675 	pages = preallocate_image_memory(alloc, avail_normal);
1676 	if (pages < alloc) {
1677 		/* We have exhausted non-highmem pages, try highmem. */
1678 		alloc -= pages;
1679 		pages += pages_highmem;
1680 		pages_highmem = preallocate_image_highmem(alloc);
1681 		if (pages_highmem < alloc)
1682 			goto err_out;
1683 		pages += pages_highmem;
1684 		/*
1685 		 * size is the desired number of saveable pages to leave in
1686 		 * memory, so try to preallocate (all memory - size) pages.
1687 		 */
1688 		alloc = (count - pages) - size;
1689 		pages += preallocate_image_highmem(alloc);
1690 	} else {
1691 		/*
1692 		 * There are approximately max_size saveable pages at this point
1693 		 * and we want to reduce this number down to size.
1694 		 */
1695 		alloc = max_size - size;
1696 		size = preallocate_highmem_fraction(alloc, highmem, count);
1697 		pages_highmem += size;
1698 		alloc -= size;
1699 		size = preallocate_image_memory(alloc, avail_normal);
1700 		pages_highmem += preallocate_image_highmem(alloc - size);
1701 		pages += pages_highmem + size;
1702 	}
1703 
1704 	/*
1705 	 * We only need as many page frames for the image as there are saveable
1706 	 * pages in memory, but we have allocated more.  Release the excessive
1707 	 * ones now.
1708 	 */
1709 	free_unnecessary_pages();
1710 
1711  out:
1712 	do_gettimeofday(&stop);
1713 	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1714 	swsusp_show_speed(&start, &stop, pages, "Allocated");
1715 
1716 	return 0;
1717 
1718  err_out:
1719 	printk(KERN_CONT "\n");
1720 	swsusp_free();
1721 	return -ENOMEM;
1722 }
1723 
1724 #ifdef CONFIG_HIGHMEM
1725 /**
1726   *	count_pages_for_highmem - compute the number of non-highmem pages
1727   *	that will be necessary for creating copies of highmem pages.
1728   */
1729 
1730 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1731 {
1732 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1733 
1734 	if (free_highmem >= nr_highmem)
1735 		nr_highmem = 0;
1736 	else
1737 		nr_highmem -= free_highmem;
1738 
1739 	return nr_highmem;
1740 }
1741 #else
1742 static unsigned int
1743 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1744 #endif /* CONFIG_HIGHMEM */
1745 
1746 /**
1747  *	enough_free_mem - Make sure we have enough free memory for the
1748  *	snapshot image.
1749  */
1750 
1751 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1752 {
1753 	struct zone *zone;
1754 	unsigned int free = alloc_normal;
1755 
1756 	for_each_populated_zone(zone)
1757 		if (!is_highmem(zone))
1758 			free += zone_page_state(zone, NR_FREE_PAGES);
1759 
1760 	nr_pages += count_pages_for_highmem(nr_highmem);
1761 	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1762 		nr_pages, PAGES_FOR_IO, free);
1763 
1764 	return free > nr_pages + PAGES_FOR_IO;
1765 }
1766 
1767 #ifdef CONFIG_HIGHMEM
1768 /**
1769  *	get_highmem_buffer - if there are some highmem pages in the suspend
1770  *	image, we may need the buffer to copy them and/or load their data.
1771  */
1772 
1773 static inline int get_highmem_buffer(int safe_needed)
1774 {
1775 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1776 	return buffer ? 0 : -ENOMEM;
1777 }
1778 
1779 /**
1780  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1781  *	Try to allocate as many pages as needed, but if the number of free
1782  *	highmem pages is lesser than that, allocate them all.
1783  */
1784 
1785 static inline unsigned int
1786 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1787 {
1788 	unsigned int to_alloc = count_free_highmem_pages();
1789 
1790 	if (to_alloc > nr_highmem)
1791 		to_alloc = nr_highmem;
1792 
1793 	nr_highmem -= to_alloc;
1794 	while (to_alloc-- > 0) {
1795 		struct page *page;
1796 
1797 		page = alloc_image_page(__GFP_HIGHMEM);
1798 		memory_bm_set_bit(bm, page_to_pfn(page));
1799 	}
1800 	return nr_highmem;
1801 }
1802 #else
1803 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1804 
1805 static inline unsigned int
1806 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1807 #endif /* CONFIG_HIGHMEM */
1808 
1809 /**
1810  *	swsusp_alloc - allocate memory for the suspend image
1811  *
1812  *	We first try to allocate as many highmem pages as there are
1813  *	saveable highmem pages in the system.  If that fails, we allocate
1814  *	non-highmem pages for the copies of the remaining highmem ones.
1815  *
1816  *	In this approach it is likely that the copies of highmem pages will
1817  *	also be located in the high memory, because of the way in which
1818  *	copy_data_pages() works.
1819  */
1820 
1821 static int
1822 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1823 		unsigned int nr_pages, unsigned int nr_highmem)
1824 {
1825 	if (nr_highmem > 0) {
1826 		if (get_highmem_buffer(PG_ANY))
1827 			goto err_out;
1828 		if (nr_highmem > alloc_highmem) {
1829 			nr_highmem -= alloc_highmem;
1830 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1831 		}
1832 	}
1833 	if (nr_pages > alloc_normal) {
1834 		nr_pages -= alloc_normal;
1835 		while (nr_pages-- > 0) {
1836 			struct page *page;
1837 
1838 			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1839 			if (!page)
1840 				goto err_out;
1841 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1842 		}
1843 	}
1844 
1845 	return 0;
1846 
1847  err_out:
1848 	swsusp_free();
1849 	return -ENOMEM;
1850 }
1851 
1852 asmlinkage __visible int swsusp_save(void)
1853 {
1854 	unsigned int nr_pages, nr_highmem;
1855 
1856 	printk(KERN_INFO "PM: Creating hibernation image:\n");
1857 
1858 	drain_local_pages(NULL);
1859 	nr_pages = count_data_pages();
1860 	nr_highmem = count_highmem_pages();
1861 	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1862 
1863 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1864 		printk(KERN_ERR "PM: Not enough free memory\n");
1865 		return -ENOMEM;
1866 	}
1867 
1868 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1869 		printk(KERN_ERR "PM: Memory allocation failed\n");
1870 		return -ENOMEM;
1871 	}
1872 
1873 	/* During allocating of suspend pagedir, new cold pages may appear.
1874 	 * Kill them.
1875 	 */
1876 	drain_local_pages(NULL);
1877 	copy_data_pages(&copy_bm, &orig_bm);
1878 
1879 	/*
1880 	 * End of critical section. From now on, we can write to memory,
1881 	 * but we should not touch disk. This specially means we must _not_
1882 	 * touch swap space! Except we must write out our image of course.
1883 	 */
1884 
1885 	nr_pages += nr_highmem;
1886 	nr_copy_pages = nr_pages;
1887 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1888 
1889 	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1890 		nr_pages);
1891 
1892 	return 0;
1893 }
1894 
1895 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1896 static int init_header_complete(struct swsusp_info *info)
1897 {
1898 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1899 	info->version_code = LINUX_VERSION_CODE;
1900 	return 0;
1901 }
1902 
1903 static char *check_image_kernel(struct swsusp_info *info)
1904 {
1905 	if (info->version_code != LINUX_VERSION_CODE)
1906 		return "kernel version";
1907 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1908 		return "system type";
1909 	if (strcmp(info->uts.release,init_utsname()->release))
1910 		return "kernel release";
1911 	if (strcmp(info->uts.version,init_utsname()->version))
1912 		return "version";
1913 	if (strcmp(info->uts.machine,init_utsname()->machine))
1914 		return "machine";
1915 	return NULL;
1916 }
1917 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1918 
1919 unsigned long snapshot_get_image_size(void)
1920 {
1921 	return nr_copy_pages + nr_meta_pages + 1;
1922 }
1923 
1924 static int init_header(struct swsusp_info *info)
1925 {
1926 	memset(info, 0, sizeof(struct swsusp_info));
1927 	info->num_physpages = get_num_physpages();
1928 	info->image_pages = nr_copy_pages;
1929 	info->pages = snapshot_get_image_size();
1930 	info->size = info->pages;
1931 	info->size <<= PAGE_SHIFT;
1932 	return init_header_complete(info);
1933 }
1934 
1935 /**
1936  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1937  *	are stored in the array @buf[] (1 page at a time)
1938  */
1939 
1940 static inline void
1941 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1942 {
1943 	int j;
1944 
1945 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1946 		buf[j] = memory_bm_next_pfn(bm);
1947 		if (unlikely(buf[j] == BM_END_OF_MAP))
1948 			break;
1949 		/* Save page key for data page (s390 only). */
1950 		page_key_read(buf + j);
1951 	}
1952 }
1953 
1954 /**
1955  *	snapshot_read_next - used for reading the system memory snapshot.
1956  *
1957  *	On the first call to it @handle should point to a zeroed
1958  *	snapshot_handle structure.  The structure gets updated and a pointer
1959  *	to it should be passed to this function every next time.
1960  *
1961  *	On success the function returns a positive number.  Then, the caller
1962  *	is allowed to read up to the returned number of bytes from the memory
1963  *	location computed by the data_of() macro.
1964  *
1965  *	The function returns 0 to indicate the end of data stream condition,
1966  *	and a negative number is returned on error.  In such cases the
1967  *	structure pointed to by @handle is not updated and should not be used
1968  *	any more.
1969  */
1970 
1971 int snapshot_read_next(struct snapshot_handle *handle)
1972 {
1973 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1974 		return 0;
1975 
1976 	if (!buffer) {
1977 		/* This makes the buffer be freed by swsusp_free() */
1978 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1979 		if (!buffer)
1980 			return -ENOMEM;
1981 	}
1982 	if (!handle->cur) {
1983 		int error;
1984 
1985 		error = init_header((struct swsusp_info *)buffer);
1986 		if (error)
1987 			return error;
1988 		handle->buffer = buffer;
1989 		memory_bm_position_reset(&orig_bm);
1990 		memory_bm_position_reset(&copy_bm);
1991 	} else if (handle->cur <= nr_meta_pages) {
1992 		clear_page(buffer);
1993 		pack_pfns(buffer, &orig_bm);
1994 	} else {
1995 		struct page *page;
1996 
1997 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1998 		if (PageHighMem(page)) {
1999 			/* Highmem pages are copied to the buffer,
2000 			 * because we can't return with a kmapped
2001 			 * highmem page (we may not be called again).
2002 			 */
2003 			void *kaddr;
2004 
2005 			kaddr = kmap_atomic(page);
2006 			copy_page(buffer, kaddr);
2007 			kunmap_atomic(kaddr);
2008 			handle->buffer = buffer;
2009 		} else {
2010 			handle->buffer = page_address(page);
2011 		}
2012 	}
2013 	handle->cur++;
2014 	return PAGE_SIZE;
2015 }
2016 
2017 /**
2018  *	mark_unsafe_pages - mark the pages that cannot be used for storing
2019  *	the image during resume, because they conflict with the pages that
2020  *	had been used before suspend
2021  */
2022 
2023 static int mark_unsafe_pages(struct memory_bitmap *bm)
2024 {
2025 	struct zone *zone;
2026 	unsigned long pfn, max_zone_pfn;
2027 
2028 	/* Clear page flags */
2029 	for_each_populated_zone(zone) {
2030 		max_zone_pfn = zone_end_pfn(zone);
2031 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2032 			if (pfn_valid(pfn))
2033 				swsusp_unset_page_free(pfn_to_page(pfn));
2034 	}
2035 
2036 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
2037 	memory_bm_position_reset(bm);
2038 	do {
2039 		pfn = memory_bm_next_pfn(bm);
2040 		if (likely(pfn != BM_END_OF_MAP)) {
2041 			if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
2042 				swsusp_set_page_free(pfn_to_page(pfn));
2043 			else
2044 				return -EFAULT;
2045 		}
2046 	} while (pfn != BM_END_OF_MAP);
2047 
2048 	allocated_unsafe_pages = 0;
2049 
2050 	return 0;
2051 }
2052 
2053 static void
2054 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
2055 {
2056 	unsigned long pfn;
2057 
2058 	memory_bm_position_reset(src);
2059 	pfn = memory_bm_next_pfn(src);
2060 	while (pfn != BM_END_OF_MAP) {
2061 		memory_bm_set_bit(dst, pfn);
2062 		pfn = memory_bm_next_pfn(src);
2063 	}
2064 }
2065 
2066 static int check_header(struct swsusp_info *info)
2067 {
2068 	char *reason;
2069 
2070 	reason = check_image_kernel(info);
2071 	if (!reason && info->num_physpages != get_num_physpages())
2072 		reason = "memory size";
2073 	if (reason) {
2074 		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2075 		return -EPERM;
2076 	}
2077 	return 0;
2078 }
2079 
2080 /**
2081  *	load header - check the image header and copy data from it
2082  */
2083 
2084 static int
2085 load_header(struct swsusp_info *info)
2086 {
2087 	int error;
2088 
2089 	restore_pblist = NULL;
2090 	error = check_header(info);
2091 	if (!error) {
2092 		nr_copy_pages = info->image_pages;
2093 		nr_meta_pages = info->pages - info->image_pages - 1;
2094 	}
2095 	return error;
2096 }
2097 
2098 /**
2099  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2100  *	the corresponding bit in the memory bitmap @bm
2101  */
2102 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2103 {
2104 	int j;
2105 
2106 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2107 		if (unlikely(buf[j] == BM_END_OF_MAP))
2108 			break;
2109 
2110 		/* Extract and buffer page key for data page (s390 only). */
2111 		page_key_memorize(buf + j);
2112 
2113 		if (memory_bm_pfn_present(bm, buf[j]))
2114 			memory_bm_set_bit(bm, buf[j]);
2115 		else
2116 			return -EFAULT;
2117 	}
2118 
2119 	return 0;
2120 }
2121 
2122 /* List of "safe" pages that may be used to store data loaded from the suspend
2123  * image
2124  */
2125 static struct linked_page *safe_pages_list;
2126 
2127 #ifdef CONFIG_HIGHMEM
2128 /* struct highmem_pbe is used for creating the list of highmem pages that
2129  * should be restored atomically during the resume from disk, because the page
2130  * frames they have occupied before the suspend are in use.
2131  */
2132 struct highmem_pbe {
2133 	struct page *copy_page;	/* data is here now */
2134 	struct page *orig_page;	/* data was here before the suspend */
2135 	struct highmem_pbe *next;
2136 };
2137 
2138 /* List of highmem PBEs needed for restoring the highmem pages that were
2139  * allocated before the suspend and included in the suspend image, but have
2140  * also been allocated by the "resume" kernel, so their contents cannot be
2141  * written directly to their "original" page frames.
2142  */
2143 static struct highmem_pbe *highmem_pblist;
2144 
2145 /**
2146  *	count_highmem_image_pages - compute the number of highmem pages in the
2147  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
2148  *	image pages are assumed to be set.
2149  */
2150 
2151 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2152 {
2153 	unsigned long pfn;
2154 	unsigned int cnt = 0;
2155 
2156 	memory_bm_position_reset(bm);
2157 	pfn = memory_bm_next_pfn(bm);
2158 	while (pfn != BM_END_OF_MAP) {
2159 		if (PageHighMem(pfn_to_page(pfn)))
2160 			cnt++;
2161 
2162 		pfn = memory_bm_next_pfn(bm);
2163 	}
2164 	return cnt;
2165 }
2166 
2167 /**
2168  *	prepare_highmem_image - try to allocate as many highmem pages as
2169  *	there are highmem image pages (@nr_highmem_p points to the variable
2170  *	containing the number of highmem image pages).  The pages that are
2171  *	"safe" (ie. will not be overwritten when the suspend image is
2172  *	restored) have the corresponding bits set in @bm (it must be
2173  *	unitialized).
2174  *
2175  *	NOTE: This function should not be called if there are no highmem
2176  *	image pages.
2177  */
2178 
2179 static unsigned int safe_highmem_pages;
2180 
2181 static struct memory_bitmap *safe_highmem_bm;
2182 
2183 static int
2184 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2185 {
2186 	unsigned int to_alloc;
2187 
2188 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2189 		return -ENOMEM;
2190 
2191 	if (get_highmem_buffer(PG_SAFE))
2192 		return -ENOMEM;
2193 
2194 	to_alloc = count_free_highmem_pages();
2195 	if (to_alloc > *nr_highmem_p)
2196 		to_alloc = *nr_highmem_p;
2197 	else
2198 		*nr_highmem_p = to_alloc;
2199 
2200 	safe_highmem_pages = 0;
2201 	while (to_alloc-- > 0) {
2202 		struct page *page;
2203 
2204 		page = alloc_page(__GFP_HIGHMEM);
2205 		if (!swsusp_page_is_free(page)) {
2206 			/* The page is "safe", set its bit the bitmap */
2207 			memory_bm_set_bit(bm, page_to_pfn(page));
2208 			safe_highmem_pages++;
2209 		}
2210 		/* Mark the page as allocated */
2211 		swsusp_set_page_forbidden(page);
2212 		swsusp_set_page_free(page);
2213 	}
2214 	memory_bm_position_reset(bm);
2215 	safe_highmem_bm = bm;
2216 	return 0;
2217 }
2218 
2219 /**
2220  *	get_highmem_page_buffer - for given highmem image page find the buffer
2221  *	that suspend_write_next() should set for its caller to write to.
2222  *
2223  *	If the page is to be saved to its "original" page frame or a copy of
2224  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
2225  *	the copy of the page is to be made in normal memory, so the address of
2226  *	the copy is returned.
2227  *
2228  *	If @buffer is returned, the caller of suspend_write_next() will write
2229  *	the page's contents to @buffer, so they will have to be copied to the
2230  *	right location on the next call to suspend_write_next() and it is done
2231  *	with the help of copy_last_highmem_page().  For this purpose, if
2232  *	@buffer is returned, @last_highmem page is set to the page to which
2233  *	the data will have to be copied from @buffer.
2234  */
2235 
2236 static struct page *last_highmem_page;
2237 
2238 static void *
2239 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2240 {
2241 	struct highmem_pbe *pbe;
2242 	void *kaddr;
2243 
2244 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2245 		/* We have allocated the "original" page frame and we can
2246 		 * use it directly to store the loaded page.
2247 		 */
2248 		last_highmem_page = page;
2249 		return buffer;
2250 	}
2251 	/* The "original" page frame has not been allocated and we have to
2252 	 * use a "safe" page frame to store the loaded page.
2253 	 */
2254 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2255 	if (!pbe) {
2256 		swsusp_free();
2257 		return ERR_PTR(-ENOMEM);
2258 	}
2259 	pbe->orig_page = page;
2260 	if (safe_highmem_pages > 0) {
2261 		struct page *tmp;
2262 
2263 		/* Copy of the page will be stored in high memory */
2264 		kaddr = buffer;
2265 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2266 		safe_highmem_pages--;
2267 		last_highmem_page = tmp;
2268 		pbe->copy_page = tmp;
2269 	} else {
2270 		/* Copy of the page will be stored in normal memory */
2271 		kaddr = safe_pages_list;
2272 		safe_pages_list = safe_pages_list->next;
2273 		pbe->copy_page = virt_to_page(kaddr);
2274 	}
2275 	pbe->next = highmem_pblist;
2276 	highmem_pblist = pbe;
2277 	return kaddr;
2278 }
2279 
2280 /**
2281  *	copy_last_highmem_page - copy the contents of a highmem image from
2282  *	@buffer, where the caller of snapshot_write_next() has place them,
2283  *	to the right location represented by @last_highmem_page .
2284  */
2285 
2286 static void copy_last_highmem_page(void)
2287 {
2288 	if (last_highmem_page) {
2289 		void *dst;
2290 
2291 		dst = kmap_atomic(last_highmem_page);
2292 		copy_page(dst, buffer);
2293 		kunmap_atomic(dst);
2294 		last_highmem_page = NULL;
2295 	}
2296 }
2297 
2298 static inline int last_highmem_page_copied(void)
2299 {
2300 	return !last_highmem_page;
2301 }
2302 
2303 static inline void free_highmem_data(void)
2304 {
2305 	if (safe_highmem_bm)
2306 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2307 
2308 	if (buffer)
2309 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2310 }
2311 #else
2312 static inline int get_safe_write_buffer(void) { return 0; }
2313 
2314 static unsigned int
2315 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2316 
2317 static inline int
2318 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2319 {
2320 	return 0;
2321 }
2322 
2323 static inline void *
2324 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2325 {
2326 	return ERR_PTR(-EINVAL);
2327 }
2328 
2329 static inline void copy_last_highmem_page(void) {}
2330 static inline int last_highmem_page_copied(void) { return 1; }
2331 static inline void free_highmem_data(void) {}
2332 #endif /* CONFIG_HIGHMEM */
2333 
2334 /**
2335  *	prepare_image - use the memory bitmap @bm to mark the pages that will
2336  *	be overwritten in the process of restoring the system memory state
2337  *	from the suspend image ("unsafe" pages) and allocate memory for the
2338  *	image.
2339  *
2340  *	The idea is to allocate a new memory bitmap first and then allocate
2341  *	as many pages as needed for the image data, but not to assign these
2342  *	pages to specific tasks initially.  Instead, we just mark them as
2343  *	allocated and create a lists of "safe" pages that will be used
2344  *	later.  On systems with high memory a list of "safe" highmem pages is
2345  *	also created.
2346  */
2347 
2348 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2349 
2350 static int
2351 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2352 {
2353 	unsigned int nr_pages, nr_highmem;
2354 	struct linked_page *sp_list, *lp;
2355 	int error;
2356 
2357 	/* If there is no highmem, the buffer will not be necessary */
2358 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2359 	buffer = NULL;
2360 
2361 	nr_highmem = count_highmem_image_pages(bm);
2362 	error = mark_unsafe_pages(bm);
2363 	if (error)
2364 		goto Free;
2365 
2366 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2367 	if (error)
2368 		goto Free;
2369 
2370 	duplicate_memory_bitmap(new_bm, bm);
2371 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2372 	if (nr_highmem > 0) {
2373 		error = prepare_highmem_image(bm, &nr_highmem);
2374 		if (error)
2375 			goto Free;
2376 	}
2377 	/* Reserve some safe pages for potential later use.
2378 	 *
2379 	 * NOTE: This way we make sure there will be enough safe pages for the
2380 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2381 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2382 	 */
2383 	sp_list = NULL;
2384 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2385 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2386 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2387 	while (nr_pages > 0) {
2388 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2389 		if (!lp) {
2390 			error = -ENOMEM;
2391 			goto Free;
2392 		}
2393 		lp->next = sp_list;
2394 		sp_list = lp;
2395 		nr_pages--;
2396 	}
2397 	/* Preallocate memory for the image */
2398 	safe_pages_list = NULL;
2399 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2400 	while (nr_pages > 0) {
2401 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2402 		if (!lp) {
2403 			error = -ENOMEM;
2404 			goto Free;
2405 		}
2406 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2407 			/* The page is "safe", add it to the list */
2408 			lp->next = safe_pages_list;
2409 			safe_pages_list = lp;
2410 		}
2411 		/* Mark the page as allocated */
2412 		swsusp_set_page_forbidden(virt_to_page(lp));
2413 		swsusp_set_page_free(virt_to_page(lp));
2414 		nr_pages--;
2415 	}
2416 	/* Free the reserved safe pages so that chain_alloc() can use them */
2417 	while (sp_list) {
2418 		lp = sp_list->next;
2419 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2420 		sp_list = lp;
2421 	}
2422 	return 0;
2423 
2424  Free:
2425 	swsusp_free();
2426 	return error;
2427 }
2428 
2429 /**
2430  *	get_buffer - compute the address that snapshot_write_next() should
2431  *	set for its caller to write to.
2432  */
2433 
2434 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2435 {
2436 	struct pbe *pbe;
2437 	struct page *page;
2438 	unsigned long pfn = memory_bm_next_pfn(bm);
2439 
2440 	if (pfn == BM_END_OF_MAP)
2441 		return ERR_PTR(-EFAULT);
2442 
2443 	page = pfn_to_page(pfn);
2444 	if (PageHighMem(page))
2445 		return get_highmem_page_buffer(page, ca);
2446 
2447 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2448 		/* We have allocated the "original" page frame and we can
2449 		 * use it directly to store the loaded page.
2450 		 */
2451 		return page_address(page);
2452 
2453 	/* The "original" page frame has not been allocated and we have to
2454 	 * use a "safe" page frame to store the loaded page.
2455 	 */
2456 	pbe = chain_alloc(ca, sizeof(struct pbe));
2457 	if (!pbe) {
2458 		swsusp_free();
2459 		return ERR_PTR(-ENOMEM);
2460 	}
2461 	pbe->orig_address = page_address(page);
2462 	pbe->address = safe_pages_list;
2463 	safe_pages_list = safe_pages_list->next;
2464 	pbe->next = restore_pblist;
2465 	restore_pblist = pbe;
2466 	return pbe->address;
2467 }
2468 
2469 /**
2470  *	snapshot_write_next - used for writing the system memory snapshot.
2471  *
2472  *	On the first call to it @handle should point to a zeroed
2473  *	snapshot_handle structure.  The structure gets updated and a pointer
2474  *	to it should be passed to this function every next time.
2475  *
2476  *	On success the function returns a positive number.  Then, the caller
2477  *	is allowed to write up to the returned number of bytes to the memory
2478  *	location computed by the data_of() macro.
2479  *
2480  *	The function returns 0 to indicate the "end of file" condition,
2481  *	and a negative number is returned on error.  In such cases the
2482  *	structure pointed to by @handle is not updated and should not be used
2483  *	any more.
2484  */
2485 
2486 int snapshot_write_next(struct snapshot_handle *handle)
2487 {
2488 	static struct chain_allocator ca;
2489 	int error = 0;
2490 
2491 	/* Check if we have already loaded the entire image */
2492 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2493 		return 0;
2494 
2495 	handle->sync_read = 1;
2496 
2497 	if (!handle->cur) {
2498 		if (!buffer)
2499 			/* This makes the buffer be freed by swsusp_free() */
2500 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2501 
2502 		if (!buffer)
2503 			return -ENOMEM;
2504 
2505 		handle->buffer = buffer;
2506 	} else if (handle->cur == 1) {
2507 		error = load_header(buffer);
2508 		if (error)
2509 			return error;
2510 
2511 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2512 		if (error)
2513 			return error;
2514 
2515 		/* Allocate buffer for page keys. */
2516 		error = page_key_alloc(nr_copy_pages);
2517 		if (error)
2518 			return error;
2519 
2520 	} else if (handle->cur <= nr_meta_pages + 1) {
2521 		error = unpack_orig_pfns(buffer, &copy_bm);
2522 		if (error)
2523 			return error;
2524 
2525 		if (handle->cur == nr_meta_pages + 1) {
2526 			error = prepare_image(&orig_bm, &copy_bm);
2527 			if (error)
2528 				return error;
2529 
2530 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2531 			memory_bm_position_reset(&orig_bm);
2532 			restore_pblist = NULL;
2533 			handle->buffer = get_buffer(&orig_bm, &ca);
2534 			handle->sync_read = 0;
2535 			if (IS_ERR(handle->buffer))
2536 				return PTR_ERR(handle->buffer);
2537 		}
2538 	} else {
2539 		copy_last_highmem_page();
2540 		/* Restore page key for data page (s390 only). */
2541 		page_key_write(handle->buffer);
2542 		handle->buffer = get_buffer(&orig_bm, &ca);
2543 		if (IS_ERR(handle->buffer))
2544 			return PTR_ERR(handle->buffer);
2545 		if (handle->buffer != buffer)
2546 			handle->sync_read = 0;
2547 	}
2548 	handle->cur++;
2549 	return PAGE_SIZE;
2550 }
2551 
2552 /**
2553  *	snapshot_write_finalize - must be called after the last call to
2554  *	snapshot_write_next() in case the last page in the image happens
2555  *	to be a highmem page and its contents should be stored in the
2556  *	highmem.  Additionally, it releases the memory that will not be
2557  *	used any more.
2558  */
2559 
2560 void snapshot_write_finalize(struct snapshot_handle *handle)
2561 {
2562 	copy_last_highmem_page();
2563 	/* Restore page key for data page (s390 only). */
2564 	page_key_write(handle->buffer);
2565 	page_key_free();
2566 	/* Free only if we have loaded the image entirely */
2567 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2568 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2569 		free_highmem_data();
2570 	}
2571 }
2572 
2573 int snapshot_image_loaded(struct snapshot_handle *handle)
2574 {
2575 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2576 			handle->cur <= nr_meta_pages + nr_copy_pages);
2577 }
2578 
2579 #ifdef CONFIG_HIGHMEM
2580 /* Assumes that @buf is ready and points to a "safe" page */
2581 static inline void
2582 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2583 {
2584 	void *kaddr1, *kaddr2;
2585 
2586 	kaddr1 = kmap_atomic(p1);
2587 	kaddr2 = kmap_atomic(p2);
2588 	copy_page(buf, kaddr1);
2589 	copy_page(kaddr1, kaddr2);
2590 	copy_page(kaddr2, buf);
2591 	kunmap_atomic(kaddr2);
2592 	kunmap_atomic(kaddr1);
2593 }
2594 
2595 /**
2596  *	restore_highmem - for each highmem page that was allocated before
2597  *	the suspend and included in the suspend image, and also has been
2598  *	allocated by the "resume" kernel swap its current (ie. "before
2599  *	resume") contents with the previous (ie. "before suspend") one.
2600  *
2601  *	If the resume eventually fails, we can call this function once
2602  *	again and restore the "before resume" highmem state.
2603  */
2604 
2605 int restore_highmem(void)
2606 {
2607 	struct highmem_pbe *pbe = highmem_pblist;
2608 	void *buf;
2609 
2610 	if (!pbe)
2611 		return 0;
2612 
2613 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2614 	if (!buf)
2615 		return -ENOMEM;
2616 
2617 	while (pbe) {
2618 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2619 		pbe = pbe->next;
2620 	}
2621 	free_image_page(buf, PG_UNSAFE_CLEAR);
2622 	return 0;
2623 }
2624 #endif /* CONFIG_HIGHMEM */
2625