xref: /openbmc/linux/kernel/power/snapshot.c (revision 722a9f92)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31 
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37 
38 #include "power.h"
39 
40 static int swsusp_page_is_free(struct page *);
41 static void swsusp_set_page_forbidden(struct page *);
42 static void swsusp_unset_page_forbidden(struct page *);
43 
44 /*
45  * Number of bytes to reserve for memory allocations made by device drivers
46  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47  * cause image creation to fail (tunable via /sys/power/reserved_size).
48  */
49 unsigned long reserved_size;
50 
51 void __init hibernate_reserved_size_init(void)
52 {
53 	reserved_size = SPARE_PAGES * PAGE_SIZE;
54 }
55 
56 /*
57  * Preferred image size in bytes (tunable via /sys/power/image_size).
58  * When it is set to N, swsusp will do its best to ensure the image
59  * size will not exceed N bytes, but if that is impossible, it will
60  * try to create the smallest image possible.
61  */
62 unsigned long image_size;
63 
64 void __init hibernate_image_size_init(void)
65 {
66 	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67 }
68 
69 /* List of PBEs needed for restoring the pages that were allocated before
70  * the suspend and included in the suspend image, but have also been
71  * allocated by the "resume" kernel, so their contents cannot be written
72  * directly to their "original" page frames.
73  */
74 struct pbe *restore_pblist;
75 
76 /* Pointer to an auxiliary buffer (1 page) */
77 static void *buffer;
78 
79 /**
80  *	@safe_needed - on resume, for storing the PBE list and the image,
81  *	we can only use memory pages that do not conflict with the pages
82  *	used before suspend.  The unsafe pages have PageNosaveFree set
83  *	and we count them using unsafe_pages.
84  *
85  *	Each allocated image page is marked as PageNosave and PageNosaveFree
86  *	so that swsusp_free() can release it.
87  */
88 
89 #define PG_ANY		0
90 #define PG_SAFE		1
91 #define PG_UNSAFE_CLEAR	1
92 #define PG_UNSAFE_KEEP	0
93 
94 static unsigned int allocated_unsafe_pages;
95 
96 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97 {
98 	void *res;
99 
100 	res = (void *)get_zeroed_page(gfp_mask);
101 	if (safe_needed)
102 		while (res && swsusp_page_is_free(virt_to_page(res))) {
103 			/* The page is unsafe, mark it for swsusp_free() */
104 			swsusp_set_page_forbidden(virt_to_page(res));
105 			allocated_unsafe_pages++;
106 			res = (void *)get_zeroed_page(gfp_mask);
107 		}
108 	if (res) {
109 		swsusp_set_page_forbidden(virt_to_page(res));
110 		swsusp_set_page_free(virt_to_page(res));
111 	}
112 	return res;
113 }
114 
115 unsigned long get_safe_page(gfp_t gfp_mask)
116 {
117 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118 }
119 
120 static struct page *alloc_image_page(gfp_t gfp_mask)
121 {
122 	struct page *page;
123 
124 	page = alloc_page(gfp_mask);
125 	if (page) {
126 		swsusp_set_page_forbidden(page);
127 		swsusp_set_page_free(page);
128 	}
129 	return page;
130 }
131 
132 /**
133  *	free_image_page - free page represented by @addr, allocated with
134  *	get_image_page (page flags set by it must be cleared)
135  */
136 
137 static inline void free_image_page(void *addr, int clear_nosave_free)
138 {
139 	struct page *page;
140 
141 	BUG_ON(!virt_addr_valid(addr));
142 
143 	page = virt_to_page(addr);
144 
145 	swsusp_unset_page_forbidden(page);
146 	if (clear_nosave_free)
147 		swsusp_unset_page_free(page);
148 
149 	__free_page(page);
150 }
151 
152 /* struct linked_page is used to build chains of pages */
153 
154 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
155 
156 struct linked_page {
157 	struct linked_page *next;
158 	char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160 
161 static inline void
162 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163 {
164 	while (list) {
165 		struct linked_page *lp = list->next;
166 
167 		free_image_page(list, clear_page_nosave);
168 		list = lp;
169 	}
170 }
171 
172 /**
173   *	struct chain_allocator is used for allocating small objects out of
174   *	a linked list of pages called 'the chain'.
175   *
176   *	The chain grows each time when there is no room for a new object in
177   *	the current page.  The allocated objects cannot be freed individually.
178   *	It is only possible to free them all at once, by freeing the entire
179   *	chain.
180   *
181   *	NOTE: The chain allocator may be inefficient if the allocated objects
182   *	are not much smaller than PAGE_SIZE.
183   */
184 
185 struct chain_allocator {
186 	struct linked_page *chain;	/* the chain */
187 	unsigned int used_space;	/* total size of objects allocated out
188 					 * of the current page
189 					 */
190 	gfp_t gfp_mask;		/* mask for allocating pages */
191 	int safe_needed;	/* if set, only "safe" pages are allocated */
192 };
193 
194 static void
195 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196 {
197 	ca->chain = NULL;
198 	ca->used_space = LINKED_PAGE_DATA_SIZE;
199 	ca->gfp_mask = gfp_mask;
200 	ca->safe_needed = safe_needed;
201 }
202 
203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204 {
205 	void *ret;
206 
207 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 		struct linked_page *lp;
209 
210 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 		if (!lp)
212 			return NULL;
213 
214 		lp->next = ca->chain;
215 		ca->chain = lp;
216 		ca->used_space = 0;
217 	}
218 	ret = ca->chain->data + ca->used_space;
219 	ca->used_space += size;
220 	return ret;
221 }
222 
223 /**
224  *	Data types related to memory bitmaps.
225  *
226  *	Memory bitmap is a structure consiting of many linked lists of
227  *	objects.  The main list's elements are of type struct zone_bitmap
228  *	and each of them corresonds to one zone.  For each zone bitmap
229  *	object there is a list of objects of type struct bm_block that
230  *	represent each blocks of bitmap in which information is stored.
231  *
232  *	struct memory_bitmap contains a pointer to the main list of zone
233  *	bitmap objects, a struct bm_position used for browsing the bitmap,
234  *	and a pointer to the list of pages used for allocating all of the
235  *	zone bitmap objects and bitmap block objects.
236  *
237  *	NOTE: It has to be possible to lay out the bitmap in memory
238  *	using only allocations of order 0.  Additionally, the bitmap is
239  *	designed to work with arbitrary number of zones (this is over the
240  *	top for now, but let's avoid making unnecessary assumptions ;-).
241  *
242  *	struct zone_bitmap contains a pointer to a list of bitmap block
243  *	objects and a pointer to the bitmap block object that has been
244  *	most recently used for setting bits.  Additionally, it contains the
245  *	pfns that correspond to the start and end of the represented zone.
246  *
247  *	struct bm_block contains a pointer to the memory page in which
248  *	information is stored (in the form of a block of bitmap)
249  *	It also contains the pfns that correspond to the start and end of
250  *	the represented memory area.
251  */
252 
253 #define BM_END_OF_MAP	(~0UL)
254 
255 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
256 
257 struct bm_block {
258 	struct list_head hook;	/* hook into a list of bitmap blocks */
259 	unsigned long start_pfn;	/* pfn represented by the first bit */
260 	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
261 	unsigned long *data;	/* bitmap representing pages */
262 };
263 
264 static inline unsigned long bm_block_bits(struct bm_block *bb)
265 {
266 	return bb->end_pfn - bb->start_pfn;
267 }
268 
269 /* strcut bm_position is used for browsing memory bitmaps */
270 
271 struct bm_position {
272 	struct bm_block *block;
273 	int bit;
274 };
275 
276 struct memory_bitmap {
277 	struct list_head blocks;	/* list of bitmap blocks */
278 	struct linked_page *p_list;	/* list of pages used to store zone
279 					 * bitmap objects and bitmap block
280 					 * objects
281 					 */
282 	struct bm_position cur;	/* most recently used bit position */
283 };
284 
285 /* Functions that operate on memory bitmaps */
286 
287 static void memory_bm_position_reset(struct memory_bitmap *bm)
288 {
289 	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
290 	bm->cur.bit = 0;
291 }
292 
293 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
294 
295 /**
296  *	create_bm_block_list - create a list of block bitmap objects
297  *	@pages - number of pages to track
298  *	@list - list to put the allocated blocks into
299  *	@ca - chain allocator to be used for allocating memory
300  */
301 static int create_bm_block_list(unsigned long pages,
302 				struct list_head *list,
303 				struct chain_allocator *ca)
304 {
305 	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
306 
307 	while (nr_blocks-- > 0) {
308 		struct bm_block *bb;
309 
310 		bb = chain_alloc(ca, sizeof(struct bm_block));
311 		if (!bb)
312 			return -ENOMEM;
313 		list_add(&bb->hook, list);
314 	}
315 
316 	return 0;
317 }
318 
319 struct mem_extent {
320 	struct list_head hook;
321 	unsigned long start;
322 	unsigned long end;
323 };
324 
325 /**
326  *	free_mem_extents - free a list of memory extents
327  *	@list - list of extents to empty
328  */
329 static void free_mem_extents(struct list_head *list)
330 {
331 	struct mem_extent *ext, *aux;
332 
333 	list_for_each_entry_safe(ext, aux, list, hook) {
334 		list_del(&ext->hook);
335 		kfree(ext);
336 	}
337 }
338 
339 /**
340  *	create_mem_extents - create a list of memory extents representing
341  *	                     contiguous ranges of PFNs
342  *	@list - list to put the extents into
343  *	@gfp_mask - mask to use for memory allocations
344  */
345 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
346 {
347 	struct zone *zone;
348 
349 	INIT_LIST_HEAD(list);
350 
351 	for_each_populated_zone(zone) {
352 		unsigned long zone_start, zone_end;
353 		struct mem_extent *ext, *cur, *aux;
354 
355 		zone_start = zone->zone_start_pfn;
356 		zone_end = zone_end_pfn(zone);
357 
358 		list_for_each_entry(ext, list, hook)
359 			if (zone_start <= ext->end)
360 				break;
361 
362 		if (&ext->hook == list || zone_end < ext->start) {
363 			/* New extent is necessary */
364 			struct mem_extent *new_ext;
365 
366 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
367 			if (!new_ext) {
368 				free_mem_extents(list);
369 				return -ENOMEM;
370 			}
371 			new_ext->start = zone_start;
372 			new_ext->end = zone_end;
373 			list_add_tail(&new_ext->hook, &ext->hook);
374 			continue;
375 		}
376 
377 		/* Merge this zone's range of PFNs with the existing one */
378 		if (zone_start < ext->start)
379 			ext->start = zone_start;
380 		if (zone_end > ext->end)
381 			ext->end = zone_end;
382 
383 		/* More merging may be possible */
384 		cur = ext;
385 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
386 			if (zone_end < cur->start)
387 				break;
388 			if (zone_end < cur->end)
389 				ext->end = cur->end;
390 			list_del(&cur->hook);
391 			kfree(cur);
392 		}
393 	}
394 
395 	return 0;
396 }
397 
398 /**
399   *	memory_bm_create - allocate memory for a memory bitmap
400   */
401 static int
402 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
403 {
404 	struct chain_allocator ca;
405 	struct list_head mem_extents;
406 	struct mem_extent *ext;
407 	int error;
408 
409 	chain_init(&ca, gfp_mask, safe_needed);
410 	INIT_LIST_HEAD(&bm->blocks);
411 
412 	error = create_mem_extents(&mem_extents, gfp_mask);
413 	if (error)
414 		return error;
415 
416 	list_for_each_entry(ext, &mem_extents, hook) {
417 		struct bm_block *bb;
418 		unsigned long pfn = ext->start;
419 		unsigned long pages = ext->end - ext->start;
420 
421 		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422 
423 		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
424 		if (error)
425 			goto Error;
426 
427 		list_for_each_entry_continue(bb, &bm->blocks, hook) {
428 			bb->data = get_image_page(gfp_mask, safe_needed);
429 			if (!bb->data) {
430 				error = -ENOMEM;
431 				goto Error;
432 			}
433 
434 			bb->start_pfn = pfn;
435 			if (pages >= BM_BITS_PER_BLOCK) {
436 				pfn += BM_BITS_PER_BLOCK;
437 				pages -= BM_BITS_PER_BLOCK;
438 			} else {
439 				/* This is executed only once in the loop */
440 				pfn += pages;
441 			}
442 			bb->end_pfn = pfn;
443 		}
444 	}
445 
446 	bm->p_list = ca.chain;
447 	memory_bm_position_reset(bm);
448  Exit:
449 	free_mem_extents(&mem_extents);
450 	return error;
451 
452  Error:
453 	bm->p_list = ca.chain;
454 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
455 	goto Exit;
456 }
457 
458 /**
459   *	memory_bm_free - free memory occupied by the memory bitmap @bm
460   */
461 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462 {
463 	struct bm_block *bb;
464 
465 	list_for_each_entry(bb, &bm->blocks, hook)
466 		if (bb->data)
467 			free_image_page(bb->data, clear_nosave_free);
468 
469 	free_list_of_pages(bm->p_list, clear_nosave_free);
470 
471 	INIT_LIST_HEAD(&bm->blocks);
472 }
473 
474 /**
475  *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
476  *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
477  *	of @bm->cur_zone_bm are updated.
478  */
479 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480 				void **addr, unsigned int *bit_nr)
481 {
482 	struct bm_block *bb;
483 
484 	/*
485 	 * Check if the pfn corresponds to the current bitmap block and find
486 	 * the block where it fits if this is not the case.
487 	 */
488 	bb = bm->cur.block;
489 	if (pfn < bb->start_pfn)
490 		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491 			if (pfn >= bb->start_pfn)
492 				break;
493 
494 	if (pfn >= bb->end_pfn)
495 		list_for_each_entry_continue(bb, &bm->blocks, hook)
496 			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
497 				break;
498 
499 	if (&bb->hook == &bm->blocks)
500 		return -EFAULT;
501 
502 	/* The block has been found */
503 	bm->cur.block = bb;
504 	pfn -= bb->start_pfn;
505 	bm->cur.bit = pfn + 1;
506 	*bit_nr = pfn;
507 	*addr = bb->data;
508 	return 0;
509 }
510 
511 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
512 {
513 	void *addr;
514 	unsigned int bit;
515 	int error;
516 
517 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
518 	BUG_ON(error);
519 	set_bit(bit, addr);
520 }
521 
522 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
523 {
524 	void *addr;
525 	unsigned int bit;
526 	int error;
527 
528 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 	if (!error)
530 		set_bit(bit, addr);
531 	return error;
532 }
533 
534 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
535 {
536 	void *addr;
537 	unsigned int bit;
538 	int error;
539 
540 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
541 	BUG_ON(error);
542 	clear_bit(bit, addr);
543 }
544 
545 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546 {
547 	void *addr;
548 	unsigned int bit;
549 	int error;
550 
551 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
552 	BUG_ON(error);
553 	return test_bit(bit, addr);
554 }
555 
556 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
557 {
558 	void *addr;
559 	unsigned int bit;
560 
561 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562 }
563 
564 /**
565  *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
566  *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
567  *	returned.
568  *
569  *	It is required to run memory_bm_position_reset() before the first call to
570  *	this function.
571  */
572 
573 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574 {
575 	struct bm_block *bb;
576 	int bit;
577 
578 	bb = bm->cur.block;
579 	do {
580 		bit = bm->cur.bit;
581 		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
582 		if (bit < bm_block_bits(bb))
583 			goto Return_pfn;
584 
585 		bb = list_entry(bb->hook.next, struct bm_block, hook);
586 		bm->cur.block = bb;
587 		bm->cur.bit = 0;
588 	} while (&bb->hook != &bm->blocks);
589 
590 	memory_bm_position_reset(bm);
591 	return BM_END_OF_MAP;
592 
593  Return_pfn:
594 	bm->cur.bit = bit + 1;
595 	return bb->start_pfn + bit;
596 }
597 
598 /**
599  *	This structure represents a range of page frames the contents of which
600  *	should not be saved during the suspend.
601  */
602 
603 struct nosave_region {
604 	struct list_head list;
605 	unsigned long start_pfn;
606 	unsigned long end_pfn;
607 };
608 
609 static LIST_HEAD(nosave_regions);
610 
611 /**
612  *	register_nosave_region - register a range of page frames the contents
613  *	of which should not be saved during the suspend (to be used in the early
614  *	initialization code)
615  */
616 
617 void __init
618 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
619 			 int use_kmalloc)
620 {
621 	struct nosave_region *region;
622 
623 	if (start_pfn >= end_pfn)
624 		return;
625 
626 	if (!list_empty(&nosave_regions)) {
627 		/* Try to extend the previous region (they should be sorted) */
628 		region = list_entry(nosave_regions.prev,
629 					struct nosave_region, list);
630 		if (region->end_pfn == start_pfn) {
631 			region->end_pfn = end_pfn;
632 			goto Report;
633 		}
634 	}
635 	if (use_kmalloc) {
636 		/* during init, this shouldn't fail */
637 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
638 		BUG_ON(!region);
639 	} else
640 		/* This allocation cannot fail */
641 		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
642 	region->start_pfn = start_pfn;
643 	region->end_pfn = end_pfn;
644 	list_add_tail(&region->list, &nosave_regions);
645  Report:
646 	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
647 		(unsigned long long) start_pfn << PAGE_SHIFT,
648 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
649 }
650 
651 /*
652  * Set bits in this map correspond to the page frames the contents of which
653  * should not be saved during the suspend.
654  */
655 static struct memory_bitmap *forbidden_pages_map;
656 
657 /* Set bits in this map correspond to free page frames. */
658 static struct memory_bitmap *free_pages_map;
659 
660 /*
661  * Each page frame allocated for creating the image is marked by setting the
662  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
663  */
664 
665 void swsusp_set_page_free(struct page *page)
666 {
667 	if (free_pages_map)
668 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
669 }
670 
671 static int swsusp_page_is_free(struct page *page)
672 {
673 	return free_pages_map ?
674 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
675 }
676 
677 void swsusp_unset_page_free(struct page *page)
678 {
679 	if (free_pages_map)
680 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
681 }
682 
683 static void swsusp_set_page_forbidden(struct page *page)
684 {
685 	if (forbidden_pages_map)
686 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
687 }
688 
689 int swsusp_page_is_forbidden(struct page *page)
690 {
691 	return forbidden_pages_map ?
692 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
693 }
694 
695 static void swsusp_unset_page_forbidden(struct page *page)
696 {
697 	if (forbidden_pages_map)
698 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
699 }
700 
701 /**
702  *	mark_nosave_pages - set bits corresponding to the page frames the
703  *	contents of which should not be saved in a given bitmap.
704  */
705 
706 static void mark_nosave_pages(struct memory_bitmap *bm)
707 {
708 	struct nosave_region *region;
709 
710 	if (list_empty(&nosave_regions))
711 		return;
712 
713 	list_for_each_entry(region, &nosave_regions, list) {
714 		unsigned long pfn;
715 
716 		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
717 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
718 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
719 				- 1);
720 
721 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
722 			if (pfn_valid(pfn)) {
723 				/*
724 				 * It is safe to ignore the result of
725 				 * mem_bm_set_bit_check() here, since we won't
726 				 * touch the PFNs for which the error is
727 				 * returned anyway.
728 				 */
729 				mem_bm_set_bit_check(bm, pfn);
730 			}
731 	}
732 }
733 
734 /**
735  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
736  *	frames that should not be saved and free page frames.  The pointers
737  *	forbidden_pages_map and free_pages_map are only modified if everything
738  *	goes well, because we don't want the bits to be used before both bitmaps
739  *	are set up.
740  */
741 
742 int create_basic_memory_bitmaps(void)
743 {
744 	struct memory_bitmap *bm1, *bm2;
745 	int error = 0;
746 
747 	if (forbidden_pages_map && free_pages_map)
748 		return 0;
749 	else
750 		BUG_ON(forbidden_pages_map || free_pages_map);
751 
752 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
753 	if (!bm1)
754 		return -ENOMEM;
755 
756 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
757 	if (error)
758 		goto Free_first_object;
759 
760 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
761 	if (!bm2)
762 		goto Free_first_bitmap;
763 
764 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
765 	if (error)
766 		goto Free_second_object;
767 
768 	forbidden_pages_map = bm1;
769 	free_pages_map = bm2;
770 	mark_nosave_pages(forbidden_pages_map);
771 
772 	pr_debug("PM: Basic memory bitmaps created\n");
773 
774 	return 0;
775 
776  Free_second_object:
777 	kfree(bm2);
778  Free_first_bitmap:
779  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
780  Free_first_object:
781 	kfree(bm1);
782 	return -ENOMEM;
783 }
784 
785 /**
786  *	free_basic_memory_bitmaps - free memory bitmaps allocated by
787  *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
788  *	so that the bitmaps themselves are not referred to while they are being
789  *	freed.
790  */
791 
792 void free_basic_memory_bitmaps(void)
793 {
794 	struct memory_bitmap *bm1, *bm2;
795 
796 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
797 		return;
798 
799 	bm1 = forbidden_pages_map;
800 	bm2 = free_pages_map;
801 	forbidden_pages_map = NULL;
802 	free_pages_map = NULL;
803 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
804 	kfree(bm1);
805 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
806 	kfree(bm2);
807 
808 	pr_debug("PM: Basic memory bitmaps freed\n");
809 }
810 
811 /**
812  *	snapshot_additional_pages - estimate the number of additional pages
813  *	be needed for setting up the suspend image data structures for given
814  *	zone (usually the returned value is greater than the exact number)
815  */
816 
817 unsigned int snapshot_additional_pages(struct zone *zone)
818 {
819 	unsigned int res;
820 
821 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
822 	res += DIV_ROUND_UP(res * sizeof(struct bm_block),
823 			    LINKED_PAGE_DATA_SIZE);
824 	return 2 * res;
825 }
826 
827 #ifdef CONFIG_HIGHMEM
828 /**
829  *	count_free_highmem_pages - compute the total number of free highmem
830  *	pages, system-wide.
831  */
832 
833 static unsigned int count_free_highmem_pages(void)
834 {
835 	struct zone *zone;
836 	unsigned int cnt = 0;
837 
838 	for_each_populated_zone(zone)
839 		if (is_highmem(zone))
840 			cnt += zone_page_state(zone, NR_FREE_PAGES);
841 
842 	return cnt;
843 }
844 
845 /**
846  *	saveable_highmem_page - Determine whether a highmem page should be
847  *	included in the suspend image.
848  *
849  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
850  *	and it isn't a part of a free chunk of pages.
851  */
852 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
853 {
854 	struct page *page;
855 
856 	if (!pfn_valid(pfn))
857 		return NULL;
858 
859 	page = pfn_to_page(pfn);
860 	if (page_zone(page) != zone)
861 		return NULL;
862 
863 	BUG_ON(!PageHighMem(page));
864 
865 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
866 	    PageReserved(page))
867 		return NULL;
868 
869 	if (page_is_guard(page))
870 		return NULL;
871 
872 	return page;
873 }
874 
875 /**
876  *	count_highmem_pages - compute the total number of saveable highmem
877  *	pages.
878  */
879 
880 static unsigned int count_highmem_pages(void)
881 {
882 	struct zone *zone;
883 	unsigned int n = 0;
884 
885 	for_each_populated_zone(zone) {
886 		unsigned long pfn, max_zone_pfn;
887 
888 		if (!is_highmem(zone))
889 			continue;
890 
891 		mark_free_pages(zone);
892 		max_zone_pfn = zone_end_pfn(zone);
893 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
894 			if (saveable_highmem_page(zone, pfn))
895 				n++;
896 	}
897 	return n;
898 }
899 #else
900 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
901 {
902 	return NULL;
903 }
904 #endif /* CONFIG_HIGHMEM */
905 
906 /**
907  *	saveable_page - Determine whether a non-highmem page should be included
908  *	in the suspend image.
909  *
910  *	We should save the page if it isn't Nosave, and is not in the range
911  *	of pages statically defined as 'unsaveable', and it isn't a part of
912  *	a free chunk of pages.
913  */
914 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
915 {
916 	struct page *page;
917 
918 	if (!pfn_valid(pfn))
919 		return NULL;
920 
921 	page = pfn_to_page(pfn);
922 	if (page_zone(page) != zone)
923 		return NULL;
924 
925 	BUG_ON(PageHighMem(page));
926 
927 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
928 		return NULL;
929 
930 	if (PageReserved(page)
931 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
932 		return NULL;
933 
934 	if (page_is_guard(page))
935 		return NULL;
936 
937 	return page;
938 }
939 
940 /**
941  *	count_data_pages - compute the total number of saveable non-highmem
942  *	pages.
943  */
944 
945 static unsigned int count_data_pages(void)
946 {
947 	struct zone *zone;
948 	unsigned long pfn, max_zone_pfn;
949 	unsigned int n = 0;
950 
951 	for_each_populated_zone(zone) {
952 		if (is_highmem(zone))
953 			continue;
954 
955 		mark_free_pages(zone);
956 		max_zone_pfn = zone_end_pfn(zone);
957 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
958 			if (saveable_page(zone, pfn))
959 				n++;
960 	}
961 	return n;
962 }
963 
964 /* This is needed, because copy_page and memcpy are not usable for copying
965  * task structs.
966  */
967 static inline void do_copy_page(long *dst, long *src)
968 {
969 	int n;
970 
971 	for (n = PAGE_SIZE / sizeof(long); n; n--)
972 		*dst++ = *src++;
973 }
974 
975 
976 /**
977  *	safe_copy_page - check if the page we are going to copy is marked as
978  *		present in the kernel page tables (this always is the case if
979  *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
980  *		kernel_page_present() always returns 'true').
981  */
982 static void safe_copy_page(void *dst, struct page *s_page)
983 {
984 	if (kernel_page_present(s_page)) {
985 		do_copy_page(dst, page_address(s_page));
986 	} else {
987 		kernel_map_pages(s_page, 1, 1);
988 		do_copy_page(dst, page_address(s_page));
989 		kernel_map_pages(s_page, 1, 0);
990 	}
991 }
992 
993 
994 #ifdef CONFIG_HIGHMEM
995 static inline struct page *
996 page_is_saveable(struct zone *zone, unsigned long pfn)
997 {
998 	return is_highmem(zone) ?
999 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1000 }
1001 
1002 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1003 {
1004 	struct page *s_page, *d_page;
1005 	void *src, *dst;
1006 
1007 	s_page = pfn_to_page(src_pfn);
1008 	d_page = pfn_to_page(dst_pfn);
1009 	if (PageHighMem(s_page)) {
1010 		src = kmap_atomic(s_page);
1011 		dst = kmap_atomic(d_page);
1012 		do_copy_page(dst, src);
1013 		kunmap_atomic(dst);
1014 		kunmap_atomic(src);
1015 	} else {
1016 		if (PageHighMem(d_page)) {
1017 			/* Page pointed to by src may contain some kernel
1018 			 * data modified by kmap_atomic()
1019 			 */
1020 			safe_copy_page(buffer, s_page);
1021 			dst = kmap_atomic(d_page);
1022 			copy_page(dst, buffer);
1023 			kunmap_atomic(dst);
1024 		} else {
1025 			safe_copy_page(page_address(d_page), s_page);
1026 		}
1027 	}
1028 }
1029 #else
1030 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1031 
1032 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1033 {
1034 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1035 				pfn_to_page(src_pfn));
1036 }
1037 #endif /* CONFIG_HIGHMEM */
1038 
1039 static void
1040 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1041 {
1042 	struct zone *zone;
1043 	unsigned long pfn;
1044 
1045 	for_each_populated_zone(zone) {
1046 		unsigned long max_zone_pfn;
1047 
1048 		mark_free_pages(zone);
1049 		max_zone_pfn = zone_end_pfn(zone);
1050 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1051 			if (page_is_saveable(zone, pfn))
1052 				memory_bm_set_bit(orig_bm, pfn);
1053 	}
1054 	memory_bm_position_reset(orig_bm);
1055 	memory_bm_position_reset(copy_bm);
1056 	for(;;) {
1057 		pfn = memory_bm_next_pfn(orig_bm);
1058 		if (unlikely(pfn == BM_END_OF_MAP))
1059 			break;
1060 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1061 	}
1062 }
1063 
1064 /* Total number of image pages */
1065 static unsigned int nr_copy_pages;
1066 /* Number of pages needed for saving the original pfns of the image pages */
1067 static unsigned int nr_meta_pages;
1068 /*
1069  * Numbers of normal and highmem page frames allocated for hibernation image
1070  * before suspending devices.
1071  */
1072 unsigned int alloc_normal, alloc_highmem;
1073 /*
1074  * Memory bitmap used for marking saveable pages (during hibernation) or
1075  * hibernation image pages (during restore)
1076  */
1077 static struct memory_bitmap orig_bm;
1078 /*
1079  * Memory bitmap used during hibernation for marking allocated page frames that
1080  * will contain copies of saveable pages.  During restore it is initially used
1081  * for marking hibernation image pages, but then the set bits from it are
1082  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1083  * used for marking "safe" highmem pages, but it has to be reinitialized for
1084  * this purpose.
1085  */
1086 static struct memory_bitmap copy_bm;
1087 
1088 /**
1089  *	swsusp_free - free pages allocated for the suspend.
1090  *
1091  *	Suspend pages are alocated before the atomic copy is made, so we
1092  *	need to release them after the resume.
1093  */
1094 
1095 void swsusp_free(void)
1096 {
1097 	struct zone *zone;
1098 	unsigned long pfn, max_zone_pfn;
1099 
1100 	for_each_populated_zone(zone) {
1101 		max_zone_pfn = zone_end_pfn(zone);
1102 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1103 			if (pfn_valid(pfn)) {
1104 				struct page *page = pfn_to_page(pfn);
1105 
1106 				if (swsusp_page_is_forbidden(page) &&
1107 				    swsusp_page_is_free(page)) {
1108 					swsusp_unset_page_forbidden(page);
1109 					swsusp_unset_page_free(page);
1110 					__free_page(page);
1111 				}
1112 			}
1113 	}
1114 	nr_copy_pages = 0;
1115 	nr_meta_pages = 0;
1116 	restore_pblist = NULL;
1117 	buffer = NULL;
1118 	alloc_normal = 0;
1119 	alloc_highmem = 0;
1120 }
1121 
1122 /* Helper functions used for the shrinking of memory. */
1123 
1124 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1125 
1126 /**
1127  * preallocate_image_pages - Allocate a number of pages for hibernation image
1128  * @nr_pages: Number of page frames to allocate.
1129  * @mask: GFP flags to use for the allocation.
1130  *
1131  * Return value: Number of page frames actually allocated
1132  */
1133 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1134 {
1135 	unsigned long nr_alloc = 0;
1136 
1137 	while (nr_pages > 0) {
1138 		struct page *page;
1139 
1140 		page = alloc_image_page(mask);
1141 		if (!page)
1142 			break;
1143 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1144 		if (PageHighMem(page))
1145 			alloc_highmem++;
1146 		else
1147 			alloc_normal++;
1148 		nr_pages--;
1149 		nr_alloc++;
1150 	}
1151 
1152 	return nr_alloc;
1153 }
1154 
1155 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1156 					      unsigned long avail_normal)
1157 {
1158 	unsigned long alloc;
1159 
1160 	if (avail_normal <= alloc_normal)
1161 		return 0;
1162 
1163 	alloc = avail_normal - alloc_normal;
1164 	if (nr_pages < alloc)
1165 		alloc = nr_pages;
1166 
1167 	return preallocate_image_pages(alloc, GFP_IMAGE);
1168 }
1169 
1170 #ifdef CONFIG_HIGHMEM
1171 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1172 {
1173 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1174 }
1175 
1176 /**
1177  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1178  */
1179 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1180 {
1181 	x *= multiplier;
1182 	do_div(x, base);
1183 	return (unsigned long)x;
1184 }
1185 
1186 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1187 						unsigned long highmem,
1188 						unsigned long total)
1189 {
1190 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1191 
1192 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1193 }
1194 #else /* CONFIG_HIGHMEM */
1195 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1196 {
1197 	return 0;
1198 }
1199 
1200 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1201 						unsigned long highmem,
1202 						unsigned long total)
1203 {
1204 	return 0;
1205 }
1206 #endif /* CONFIG_HIGHMEM */
1207 
1208 /**
1209  * free_unnecessary_pages - Release preallocated pages not needed for the image
1210  */
1211 static void free_unnecessary_pages(void)
1212 {
1213 	unsigned long save, to_free_normal, to_free_highmem;
1214 
1215 	save = count_data_pages();
1216 	if (alloc_normal >= save) {
1217 		to_free_normal = alloc_normal - save;
1218 		save = 0;
1219 	} else {
1220 		to_free_normal = 0;
1221 		save -= alloc_normal;
1222 	}
1223 	save += count_highmem_pages();
1224 	if (alloc_highmem >= save) {
1225 		to_free_highmem = alloc_highmem - save;
1226 	} else {
1227 		to_free_highmem = 0;
1228 		save -= alloc_highmem;
1229 		if (to_free_normal > save)
1230 			to_free_normal -= save;
1231 		else
1232 			to_free_normal = 0;
1233 	}
1234 
1235 	memory_bm_position_reset(&copy_bm);
1236 
1237 	while (to_free_normal > 0 || to_free_highmem > 0) {
1238 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1239 		struct page *page = pfn_to_page(pfn);
1240 
1241 		if (PageHighMem(page)) {
1242 			if (!to_free_highmem)
1243 				continue;
1244 			to_free_highmem--;
1245 			alloc_highmem--;
1246 		} else {
1247 			if (!to_free_normal)
1248 				continue;
1249 			to_free_normal--;
1250 			alloc_normal--;
1251 		}
1252 		memory_bm_clear_bit(&copy_bm, pfn);
1253 		swsusp_unset_page_forbidden(page);
1254 		swsusp_unset_page_free(page);
1255 		__free_page(page);
1256 	}
1257 }
1258 
1259 /**
1260  * minimum_image_size - Estimate the minimum acceptable size of an image
1261  * @saveable: Number of saveable pages in the system.
1262  *
1263  * We want to avoid attempting to free too much memory too hard, so estimate the
1264  * minimum acceptable size of a hibernation image to use as the lower limit for
1265  * preallocating memory.
1266  *
1267  * We assume that the minimum image size should be proportional to
1268  *
1269  * [number of saveable pages] - [number of pages that can be freed in theory]
1270  *
1271  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1272  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1273  * minus mapped file pages.
1274  */
1275 static unsigned long minimum_image_size(unsigned long saveable)
1276 {
1277 	unsigned long size;
1278 
1279 	size = global_page_state(NR_SLAB_RECLAIMABLE)
1280 		+ global_page_state(NR_ACTIVE_ANON)
1281 		+ global_page_state(NR_INACTIVE_ANON)
1282 		+ global_page_state(NR_ACTIVE_FILE)
1283 		+ global_page_state(NR_INACTIVE_FILE)
1284 		- global_page_state(NR_FILE_MAPPED);
1285 
1286 	return saveable <= size ? 0 : saveable - size;
1287 }
1288 
1289 /**
1290  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1291  *
1292  * To create a hibernation image it is necessary to make a copy of every page
1293  * frame in use.  We also need a number of page frames to be free during
1294  * hibernation for allocations made while saving the image and for device
1295  * drivers, in case they need to allocate memory from their hibernation
1296  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1297  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1298  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1299  * total number of available page frames and allocate at least
1300  *
1301  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1302  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1303  *
1304  * of them, which corresponds to the maximum size of a hibernation image.
1305  *
1306  * If image_size is set below the number following from the above formula,
1307  * the preallocation of memory is continued until the total number of saveable
1308  * pages in the system is below the requested image size or the minimum
1309  * acceptable image size returned by minimum_image_size(), whichever is greater.
1310  */
1311 int hibernate_preallocate_memory(void)
1312 {
1313 	struct zone *zone;
1314 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1315 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1316 	struct timeval start, stop;
1317 	int error;
1318 
1319 	printk(KERN_INFO "PM: Preallocating image memory... ");
1320 	do_gettimeofday(&start);
1321 
1322 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1323 	if (error)
1324 		goto err_out;
1325 
1326 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1327 	if (error)
1328 		goto err_out;
1329 
1330 	alloc_normal = 0;
1331 	alloc_highmem = 0;
1332 
1333 	/* Count the number of saveable data pages. */
1334 	save_highmem = count_highmem_pages();
1335 	saveable = count_data_pages();
1336 
1337 	/*
1338 	 * Compute the total number of page frames we can use (count) and the
1339 	 * number of pages needed for image metadata (size).
1340 	 */
1341 	count = saveable;
1342 	saveable += save_highmem;
1343 	highmem = save_highmem;
1344 	size = 0;
1345 	for_each_populated_zone(zone) {
1346 		size += snapshot_additional_pages(zone);
1347 		if (is_highmem(zone))
1348 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1349 		else
1350 			count += zone_page_state(zone, NR_FREE_PAGES);
1351 	}
1352 	avail_normal = count;
1353 	count += highmem;
1354 	count -= totalreserve_pages;
1355 
1356 	/* Add number of pages required for page keys (s390 only). */
1357 	size += page_key_additional_pages(saveable);
1358 
1359 	/* Compute the maximum number of saveable pages to leave in memory. */
1360 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1361 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1362 	/* Compute the desired number of image pages specified by image_size. */
1363 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1364 	if (size > max_size)
1365 		size = max_size;
1366 	/*
1367 	 * If the desired number of image pages is at least as large as the
1368 	 * current number of saveable pages in memory, allocate page frames for
1369 	 * the image and we're done.
1370 	 */
1371 	if (size >= saveable) {
1372 		pages = preallocate_image_highmem(save_highmem);
1373 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1374 		goto out;
1375 	}
1376 
1377 	/* Estimate the minimum size of the image. */
1378 	pages = minimum_image_size(saveable);
1379 	/*
1380 	 * To avoid excessive pressure on the normal zone, leave room in it to
1381 	 * accommodate an image of the minimum size (unless it's already too
1382 	 * small, in which case don't preallocate pages from it at all).
1383 	 */
1384 	if (avail_normal > pages)
1385 		avail_normal -= pages;
1386 	else
1387 		avail_normal = 0;
1388 	if (size < pages)
1389 		size = min_t(unsigned long, pages, max_size);
1390 
1391 	/*
1392 	 * Let the memory management subsystem know that we're going to need a
1393 	 * large number of page frames to allocate and make it free some memory.
1394 	 * NOTE: If this is not done, performance will be hurt badly in some
1395 	 * test cases.
1396 	 */
1397 	shrink_all_memory(saveable - size);
1398 
1399 	/*
1400 	 * The number of saveable pages in memory was too high, so apply some
1401 	 * pressure to decrease it.  First, make room for the largest possible
1402 	 * image and fail if that doesn't work.  Next, try to decrease the size
1403 	 * of the image as much as indicated by 'size' using allocations from
1404 	 * highmem and non-highmem zones separately.
1405 	 */
1406 	pages_highmem = preallocate_image_highmem(highmem / 2);
1407 	alloc = count - max_size;
1408 	if (alloc > pages_highmem)
1409 		alloc -= pages_highmem;
1410 	else
1411 		alloc = 0;
1412 	pages = preallocate_image_memory(alloc, avail_normal);
1413 	if (pages < alloc) {
1414 		/* We have exhausted non-highmem pages, try highmem. */
1415 		alloc -= pages;
1416 		pages += pages_highmem;
1417 		pages_highmem = preallocate_image_highmem(alloc);
1418 		if (pages_highmem < alloc)
1419 			goto err_out;
1420 		pages += pages_highmem;
1421 		/*
1422 		 * size is the desired number of saveable pages to leave in
1423 		 * memory, so try to preallocate (all memory - size) pages.
1424 		 */
1425 		alloc = (count - pages) - size;
1426 		pages += preallocate_image_highmem(alloc);
1427 	} else {
1428 		/*
1429 		 * There are approximately max_size saveable pages at this point
1430 		 * and we want to reduce this number down to size.
1431 		 */
1432 		alloc = max_size - size;
1433 		size = preallocate_highmem_fraction(alloc, highmem, count);
1434 		pages_highmem += size;
1435 		alloc -= size;
1436 		size = preallocate_image_memory(alloc, avail_normal);
1437 		pages_highmem += preallocate_image_highmem(alloc - size);
1438 		pages += pages_highmem + size;
1439 	}
1440 
1441 	/*
1442 	 * We only need as many page frames for the image as there are saveable
1443 	 * pages in memory, but we have allocated more.  Release the excessive
1444 	 * ones now.
1445 	 */
1446 	free_unnecessary_pages();
1447 
1448  out:
1449 	do_gettimeofday(&stop);
1450 	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1451 	swsusp_show_speed(&start, &stop, pages, "Allocated");
1452 
1453 	return 0;
1454 
1455  err_out:
1456 	printk(KERN_CONT "\n");
1457 	swsusp_free();
1458 	return -ENOMEM;
1459 }
1460 
1461 #ifdef CONFIG_HIGHMEM
1462 /**
1463   *	count_pages_for_highmem - compute the number of non-highmem pages
1464   *	that will be necessary for creating copies of highmem pages.
1465   */
1466 
1467 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1468 {
1469 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1470 
1471 	if (free_highmem >= nr_highmem)
1472 		nr_highmem = 0;
1473 	else
1474 		nr_highmem -= free_highmem;
1475 
1476 	return nr_highmem;
1477 }
1478 #else
1479 static unsigned int
1480 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1481 #endif /* CONFIG_HIGHMEM */
1482 
1483 /**
1484  *	enough_free_mem - Make sure we have enough free memory for the
1485  *	snapshot image.
1486  */
1487 
1488 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1489 {
1490 	struct zone *zone;
1491 	unsigned int free = alloc_normal;
1492 
1493 	for_each_populated_zone(zone)
1494 		if (!is_highmem(zone))
1495 			free += zone_page_state(zone, NR_FREE_PAGES);
1496 
1497 	nr_pages += count_pages_for_highmem(nr_highmem);
1498 	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1499 		nr_pages, PAGES_FOR_IO, free);
1500 
1501 	return free > nr_pages + PAGES_FOR_IO;
1502 }
1503 
1504 #ifdef CONFIG_HIGHMEM
1505 /**
1506  *	get_highmem_buffer - if there are some highmem pages in the suspend
1507  *	image, we may need the buffer to copy them and/or load their data.
1508  */
1509 
1510 static inline int get_highmem_buffer(int safe_needed)
1511 {
1512 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1513 	return buffer ? 0 : -ENOMEM;
1514 }
1515 
1516 /**
1517  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1518  *	Try to allocate as many pages as needed, but if the number of free
1519  *	highmem pages is lesser than that, allocate them all.
1520  */
1521 
1522 static inline unsigned int
1523 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1524 {
1525 	unsigned int to_alloc = count_free_highmem_pages();
1526 
1527 	if (to_alloc > nr_highmem)
1528 		to_alloc = nr_highmem;
1529 
1530 	nr_highmem -= to_alloc;
1531 	while (to_alloc-- > 0) {
1532 		struct page *page;
1533 
1534 		page = alloc_image_page(__GFP_HIGHMEM);
1535 		memory_bm_set_bit(bm, page_to_pfn(page));
1536 	}
1537 	return nr_highmem;
1538 }
1539 #else
1540 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1541 
1542 static inline unsigned int
1543 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1544 #endif /* CONFIG_HIGHMEM */
1545 
1546 /**
1547  *	swsusp_alloc - allocate memory for the suspend image
1548  *
1549  *	We first try to allocate as many highmem pages as there are
1550  *	saveable highmem pages in the system.  If that fails, we allocate
1551  *	non-highmem pages for the copies of the remaining highmem ones.
1552  *
1553  *	In this approach it is likely that the copies of highmem pages will
1554  *	also be located in the high memory, because of the way in which
1555  *	copy_data_pages() works.
1556  */
1557 
1558 static int
1559 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1560 		unsigned int nr_pages, unsigned int nr_highmem)
1561 {
1562 	if (nr_highmem > 0) {
1563 		if (get_highmem_buffer(PG_ANY))
1564 			goto err_out;
1565 		if (nr_highmem > alloc_highmem) {
1566 			nr_highmem -= alloc_highmem;
1567 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1568 		}
1569 	}
1570 	if (nr_pages > alloc_normal) {
1571 		nr_pages -= alloc_normal;
1572 		while (nr_pages-- > 0) {
1573 			struct page *page;
1574 
1575 			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1576 			if (!page)
1577 				goto err_out;
1578 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1579 		}
1580 	}
1581 
1582 	return 0;
1583 
1584  err_out:
1585 	swsusp_free();
1586 	return -ENOMEM;
1587 }
1588 
1589 asmlinkage __visible int swsusp_save(void)
1590 {
1591 	unsigned int nr_pages, nr_highmem;
1592 
1593 	printk(KERN_INFO "PM: Creating hibernation image:\n");
1594 
1595 	drain_local_pages(NULL);
1596 	nr_pages = count_data_pages();
1597 	nr_highmem = count_highmem_pages();
1598 	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1599 
1600 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1601 		printk(KERN_ERR "PM: Not enough free memory\n");
1602 		return -ENOMEM;
1603 	}
1604 
1605 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1606 		printk(KERN_ERR "PM: Memory allocation failed\n");
1607 		return -ENOMEM;
1608 	}
1609 
1610 	/* During allocating of suspend pagedir, new cold pages may appear.
1611 	 * Kill them.
1612 	 */
1613 	drain_local_pages(NULL);
1614 	copy_data_pages(&copy_bm, &orig_bm);
1615 
1616 	/*
1617 	 * End of critical section. From now on, we can write to memory,
1618 	 * but we should not touch disk. This specially means we must _not_
1619 	 * touch swap space! Except we must write out our image of course.
1620 	 */
1621 
1622 	nr_pages += nr_highmem;
1623 	nr_copy_pages = nr_pages;
1624 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1625 
1626 	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1627 		nr_pages);
1628 
1629 	return 0;
1630 }
1631 
1632 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1633 static int init_header_complete(struct swsusp_info *info)
1634 {
1635 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1636 	info->version_code = LINUX_VERSION_CODE;
1637 	return 0;
1638 }
1639 
1640 static char *check_image_kernel(struct swsusp_info *info)
1641 {
1642 	if (info->version_code != LINUX_VERSION_CODE)
1643 		return "kernel version";
1644 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1645 		return "system type";
1646 	if (strcmp(info->uts.release,init_utsname()->release))
1647 		return "kernel release";
1648 	if (strcmp(info->uts.version,init_utsname()->version))
1649 		return "version";
1650 	if (strcmp(info->uts.machine,init_utsname()->machine))
1651 		return "machine";
1652 	return NULL;
1653 }
1654 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1655 
1656 unsigned long snapshot_get_image_size(void)
1657 {
1658 	return nr_copy_pages + nr_meta_pages + 1;
1659 }
1660 
1661 static int init_header(struct swsusp_info *info)
1662 {
1663 	memset(info, 0, sizeof(struct swsusp_info));
1664 	info->num_physpages = get_num_physpages();
1665 	info->image_pages = nr_copy_pages;
1666 	info->pages = snapshot_get_image_size();
1667 	info->size = info->pages;
1668 	info->size <<= PAGE_SHIFT;
1669 	return init_header_complete(info);
1670 }
1671 
1672 /**
1673  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1674  *	are stored in the array @buf[] (1 page at a time)
1675  */
1676 
1677 static inline void
1678 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1679 {
1680 	int j;
1681 
1682 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1683 		buf[j] = memory_bm_next_pfn(bm);
1684 		if (unlikely(buf[j] == BM_END_OF_MAP))
1685 			break;
1686 		/* Save page key for data page (s390 only). */
1687 		page_key_read(buf + j);
1688 	}
1689 }
1690 
1691 /**
1692  *	snapshot_read_next - used for reading the system memory snapshot.
1693  *
1694  *	On the first call to it @handle should point to a zeroed
1695  *	snapshot_handle structure.  The structure gets updated and a pointer
1696  *	to it should be passed to this function every next time.
1697  *
1698  *	On success the function returns a positive number.  Then, the caller
1699  *	is allowed to read up to the returned number of bytes from the memory
1700  *	location computed by the data_of() macro.
1701  *
1702  *	The function returns 0 to indicate the end of data stream condition,
1703  *	and a negative number is returned on error.  In such cases the
1704  *	structure pointed to by @handle is not updated and should not be used
1705  *	any more.
1706  */
1707 
1708 int snapshot_read_next(struct snapshot_handle *handle)
1709 {
1710 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1711 		return 0;
1712 
1713 	if (!buffer) {
1714 		/* This makes the buffer be freed by swsusp_free() */
1715 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1716 		if (!buffer)
1717 			return -ENOMEM;
1718 	}
1719 	if (!handle->cur) {
1720 		int error;
1721 
1722 		error = init_header((struct swsusp_info *)buffer);
1723 		if (error)
1724 			return error;
1725 		handle->buffer = buffer;
1726 		memory_bm_position_reset(&orig_bm);
1727 		memory_bm_position_reset(&copy_bm);
1728 	} else if (handle->cur <= nr_meta_pages) {
1729 		clear_page(buffer);
1730 		pack_pfns(buffer, &orig_bm);
1731 	} else {
1732 		struct page *page;
1733 
1734 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1735 		if (PageHighMem(page)) {
1736 			/* Highmem pages are copied to the buffer,
1737 			 * because we can't return with a kmapped
1738 			 * highmem page (we may not be called again).
1739 			 */
1740 			void *kaddr;
1741 
1742 			kaddr = kmap_atomic(page);
1743 			copy_page(buffer, kaddr);
1744 			kunmap_atomic(kaddr);
1745 			handle->buffer = buffer;
1746 		} else {
1747 			handle->buffer = page_address(page);
1748 		}
1749 	}
1750 	handle->cur++;
1751 	return PAGE_SIZE;
1752 }
1753 
1754 /**
1755  *	mark_unsafe_pages - mark the pages that cannot be used for storing
1756  *	the image during resume, because they conflict with the pages that
1757  *	had been used before suspend
1758  */
1759 
1760 static int mark_unsafe_pages(struct memory_bitmap *bm)
1761 {
1762 	struct zone *zone;
1763 	unsigned long pfn, max_zone_pfn;
1764 
1765 	/* Clear page flags */
1766 	for_each_populated_zone(zone) {
1767 		max_zone_pfn = zone_end_pfn(zone);
1768 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1769 			if (pfn_valid(pfn))
1770 				swsusp_unset_page_free(pfn_to_page(pfn));
1771 	}
1772 
1773 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1774 	memory_bm_position_reset(bm);
1775 	do {
1776 		pfn = memory_bm_next_pfn(bm);
1777 		if (likely(pfn != BM_END_OF_MAP)) {
1778 			if (likely(pfn_valid(pfn)))
1779 				swsusp_set_page_free(pfn_to_page(pfn));
1780 			else
1781 				return -EFAULT;
1782 		}
1783 	} while (pfn != BM_END_OF_MAP);
1784 
1785 	allocated_unsafe_pages = 0;
1786 
1787 	return 0;
1788 }
1789 
1790 static void
1791 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1792 {
1793 	unsigned long pfn;
1794 
1795 	memory_bm_position_reset(src);
1796 	pfn = memory_bm_next_pfn(src);
1797 	while (pfn != BM_END_OF_MAP) {
1798 		memory_bm_set_bit(dst, pfn);
1799 		pfn = memory_bm_next_pfn(src);
1800 	}
1801 }
1802 
1803 static int check_header(struct swsusp_info *info)
1804 {
1805 	char *reason;
1806 
1807 	reason = check_image_kernel(info);
1808 	if (!reason && info->num_physpages != get_num_physpages())
1809 		reason = "memory size";
1810 	if (reason) {
1811 		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1812 		return -EPERM;
1813 	}
1814 	return 0;
1815 }
1816 
1817 /**
1818  *	load header - check the image header and copy data from it
1819  */
1820 
1821 static int
1822 load_header(struct swsusp_info *info)
1823 {
1824 	int error;
1825 
1826 	restore_pblist = NULL;
1827 	error = check_header(info);
1828 	if (!error) {
1829 		nr_copy_pages = info->image_pages;
1830 		nr_meta_pages = info->pages - info->image_pages - 1;
1831 	}
1832 	return error;
1833 }
1834 
1835 /**
1836  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1837  *	the corresponding bit in the memory bitmap @bm
1838  */
1839 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1840 {
1841 	int j;
1842 
1843 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1844 		if (unlikely(buf[j] == BM_END_OF_MAP))
1845 			break;
1846 
1847 		/* Extract and buffer page key for data page (s390 only). */
1848 		page_key_memorize(buf + j);
1849 
1850 		if (memory_bm_pfn_present(bm, buf[j]))
1851 			memory_bm_set_bit(bm, buf[j]);
1852 		else
1853 			return -EFAULT;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 /* List of "safe" pages that may be used to store data loaded from the suspend
1860  * image
1861  */
1862 static struct linked_page *safe_pages_list;
1863 
1864 #ifdef CONFIG_HIGHMEM
1865 /* struct highmem_pbe is used for creating the list of highmem pages that
1866  * should be restored atomically during the resume from disk, because the page
1867  * frames they have occupied before the suspend are in use.
1868  */
1869 struct highmem_pbe {
1870 	struct page *copy_page;	/* data is here now */
1871 	struct page *orig_page;	/* data was here before the suspend */
1872 	struct highmem_pbe *next;
1873 };
1874 
1875 /* List of highmem PBEs needed for restoring the highmem pages that were
1876  * allocated before the suspend and included in the suspend image, but have
1877  * also been allocated by the "resume" kernel, so their contents cannot be
1878  * written directly to their "original" page frames.
1879  */
1880 static struct highmem_pbe *highmem_pblist;
1881 
1882 /**
1883  *	count_highmem_image_pages - compute the number of highmem pages in the
1884  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1885  *	image pages are assumed to be set.
1886  */
1887 
1888 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1889 {
1890 	unsigned long pfn;
1891 	unsigned int cnt = 0;
1892 
1893 	memory_bm_position_reset(bm);
1894 	pfn = memory_bm_next_pfn(bm);
1895 	while (pfn != BM_END_OF_MAP) {
1896 		if (PageHighMem(pfn_to_page(pfn)))
1897 			cnt++;
1898 
1899 		pfn = memory_bm_next_pfn(bm);
1900 	}
1901 	return cnt;
1902 }
1903 
1904 /**
1905  *	prepare_highmem_image - try to allocate as many highmem pages as
1906  *	there are highmem image pages (@nr_highmem_p points to the variable
1907  *	containing the number of highmem image pages).  The pages that are
1908  *	"safe" (ie. will not be overwritten when the suspend image is
1909  *	restored) have the corresponding bits set in @bm (it must be
1910  *	unitialized).
1911  *
1912  *	NOTE: This function should not be called if there are no highmem
1913  *	image pages.
1914  */
1915 
1916 static unsigned int safe_highmem_pages;
1917 
1918 static struct memory_bitmap *safe_highmem_bm;
1919 
1920 static int
1921 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1922 {
1923 	unsigned int to_alloc;
1924 
1925 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1926 		return -ENOMEM;
1927 
1928 	if (get_highmem_buffer(PG_SAFE))
1929 		return -ENOMEM;
1930 
1931 	to_alloc = count_free_highmem_pages();
1932 	if (to_alloc > *nr_highmem_p)
1933 		to_alloc = *nr_highmem_p;
1934 	else
1935 		*nr_highmem_p = to_alloc;
1936 
1937 	safe_highmem_pages = 0;
1938 	while (to_alloc-- > 0) {
1939 		struct page *page;
1940 
1941 		page = alloc_page(__GFP_HIGHMEM);
1942 		if (!swsusp_page_is_free(page)) {
1943 			/* The page is "safe", set its bit the bitmap */
1944 			memory_bm_set_bit(bm, page_to_pfn(page));
1945 			safe_highmem_pages++;
1946 		}
1947 		/* Mark the page as allocated */
1948 		swsusp_set_page_forbidden(page);
1949 		swsusp_set_page_free(page);
1950 	}
1951 	memory_bm_position_reset(bm);
1952 	safe_highmem_bm = bm;
1953 	return 0;
1954 }
1955 
1956 /**
1957  *	get_highmem_page_buffer - for given highmem image page find the buffer
1958  *	that suspend_write_next() should set for its caller to write to.
1959  *
1960  *	If the page is to be saved to its "original" page frame or a copy of
1961  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1962  *	the copy of the page is to be made in normal memory, so the address of
1963  *	the copy is returned.
1964  *
1965  *	If @buffer is returned, the caller of suspend_write_next() will write
1966  *	the page's contents to @buffer, so they will have to be copied to the
1967  *	right location on the next call to suspend_write_next() and it is done
1968  *	with the help of copy_last_highmem_page().  For this purpose, if
1969  *	@buffer is returned, @last_highmem page is set to the page to which
1970  *	the data will have to be copied from @buffer.
1971  */
1972 
1973 static struct page *last_highmem_page;
1974 
1975 static void *
1976 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1977 {
1978 	struct highmem_pbe *pbe;
1979 	void *kaddr;
1980 
1981 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1982 		/* We have allocated the "original" page frame and we can
1983 		 * use it directly to store the loaded page.
1984 		 */
1985 		last_highmem_page = page;
1986 		return buffer;
1987 	}
1988 	/* The "original" page frame has not been allocated and we have to
1989 	 * use a "safe" page frame to store the loaded page.
1990 	 */
1991 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1992 	if (!pbe) {
1993 		swsusp_free();
1994 		return ERR_PTR(-ENOMEM);
1995 	}
1996 	pbe->orig_page = page;
1997 	if (safe_highmem_pages > 0) {
1998 		struct page *tmp;
1999 
2000 		/* Copy of the page will be stored in high memory */
2001 		kaddr = buffer;
2002 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2003 		safe_highmem_pages--;
2004 		last_highmem_page = tmp;
2005 		pbe->copy_page = tmp;
2006 	} else {
2007 		/* Copy of the page will be stored in normal memory */
2008 		kaddr = safe_pages_list;
2009 		safe_pages_list = safe_pages_list->next;
2010 		pbe->copy_page = virt_to_page(kaddr);
2011 	}
2012 	pbe->next = highmem_pblist;
2013 	highmem_pblist = pbe;
2014 	return kaddr;
2015 }
2016 
2017 /**
2018  *	copy_last_highmem_page - copy the contents of a highmem image from
2019  *	@buffer, where the caller of snapshot_write_next() has place them,
2020  *	to the right location represented by @last_highmem_page .
2021  */
2022 
2023 static void copy_last_highmem_page(void)
2024 {
2025 	if (last_highmem_page) {
2026 		void *dst;
2027 
2028 		dst = kmap_atomic(last_highmem_page);
2029 		copy_page(dst, buffer);
2030 		kunmap_atomic(dst);
2031 		last_highmem_page = NULL;
2032 	}
2033 }
2034 
2035 static inline int last_highmem_page_copied(void)
2036 {
2037 	return !last_highmem_page;
2038 }
2039 
2040 static inline void free_highmem_data(void)
2041 {
2042 	if (safe_highmem_bm)
2043 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2044 
2045 	if (buffer)
2046 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2047 }
2048 #else
2049 static inline int get_safe_write_buffer(void) { return 0; }
2050 
2051 static unsigned int
2052 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2053 
2054 static inline int
2055 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2056 {
2057 	return 0;
2058 }
2059 
2060 static inline void *
2061 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2062 {
2063 	return ERR_PTR(-EINVAL);
2064 }
2065 
2066 static inline void copy_last_highmem_page(void) {}
2067 static inline int last_highmem_page_copied(void) { return 1; }
2068 static inline void free_highmem_data(void) {}
2069 #endif /* CONFIG_HIGHMEM */
2070 
2071 /**
2072  *	prepare_image - use the memory bitmap @bm to mark the pages that will
2073  *	be overwritten in the process of restoring the system memory state
2074  *	from the suspend image ("unsafe" pages) and allocate memory for the
2075  *	image.
2076  *
2077  *	The idea is to allocate a new memory bitmap first and then allocate
2078  *	as many pages as needed for the image data, but not to assign these
2079  *	pages to specific tasks initially.  Instead, we just mark them as
2080  *	allocated and create a lists of "safe" pages that will be used
2081  *	later.  On systems with high memory a list of "safe" highmem pages is
2082  *	also created.
2083  */
2084 
2085 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2086 
2087 static int
2088 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2089 {
2090 	unsigned int nr_pages, nr_highmem;
2091 	struct linked_page *sp_list, *lp;
2092 	int error;
2093 
2094 	/* If there is no highmem, the buffer will not be necessary */
2095 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2096 	buffer = NULL;
2097 
2098 	nr_highmem = count_highmem_image_pages(bm);
2099 	error = mark_unsafe_pages(bm);
2100 	if (error)
2101 		goto Free;
2102 
2103 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2104 	if (error)
2105 		goto Free;
2106 
2107 	duplicate_memory_bitmap(new_bm, bm);
2108 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2109 	if (nr_highmem > 0) {
2110 		error = prepare_highmem_image(bm, &nr_highmem);
2111 		if (error)
2112 			goto Free;
2113 	}
2114 	/* Reserve some safe pages for potential later use.
2115 	 *
2116 	 * NOTE: This way we make sure there will be enough safe pages for the
2117 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2118 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2119 	 */
2120 	sp_list = NULL;
2121 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2122 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2123 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2124 	while (nr_pages > 0) {
2125 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2126 		if (!lp) {
2127 			error = -ENOMEM;
2128 			goto Free;
2129 		}
2130 		lp->next = sp_list;
2131 		sp_list = lp;
2132 		nr_pages--;
2133 	}
2134 	/* Preallocate memory for the image */
2135 	safe_pages_list = NULL;
2136 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2137 	while (nr_pages > 0) {
2138 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2139 		if (!lp) {
2140 			error = -ENOMEM;
2141 			goto Free;
2142 		}
2143 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2144 			/* The page is "safe", add it to the list */
2145 			lp->next = safe_pages_list;
2146 			safe_pages_list = lp;
2147 		}
2148 		/* Mark the page as allocated */
2149 		swsusp_set_page_forbidden(virt_to_page(lp));
2150 		swsusp_set_page_free(virt_to_page(lp));
2151 		nr_pages--;
2152 	}
2153 	/* Free the reserved safe pages so that chain_alloc() can use them */
2154 	while (sp_list) {
2155 		lp = sp_list->next;
2156 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2157 		sp_list = lp;
2158 	}
2159 	return 0;
2160 
2161  Free:
2162 	swsusp_free();
2163 	return error;
2164 }
2165 
2166 /**
2167  *	get_buffer - compute the address that snapshot_write_next() should
2168  *	set for its caller to write to.
2169  */
2170 
2171 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2172 {
2173 	struct pbe *pbe;
2174 	struct page *page;
2175 	unsigned long pfn = memory_bm_next_pfn(bm);
2176 
2177 	if (pfn == BM_END_OF_MAP)
2178 		return ERR_PTR(-EFAULT);
2179 
2180 	page = pfn_to_page(pfn);
2181 	if (PageHighMem(page))
2182 		return get_highmem_page_buffer(page, ca);
2183 
2184 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2185 		/* We have allocated the "original" page frame and we can
2186 		 * use it directly to store the loaded page.
2187 		 */
2188 		return page_address(page);
2189 
2190 	/* The "original" page frame has not been allocated and we have to
2191 	 * use a "safe" page frame to store the loaded page.
2192 	 */
2193 	pbe = chain_alloc(ca, sizeof(struct pbe));
2194 	if (!pbe) {
2195 		swsusp_free();
2196 		return ERR_PTR(-ENOMEM);
2197 	}
2198 	pbe->orig_address = page_address(page);
2199 	pbe->address = safe_pages_list;
2200 	safe_pages_list = safe_pages_list->next;
2201 	pbe->next = restore_pblist;
2202 	restore_pblist = pbe;
2203 	return pbe->address;
2204 }
2205 
2206 /**
2207  *	snapshot_write_next - used for writing the system memory snapshot.
2208  *
2209  *	On the first call to it @handle should point to a zeroed
2210  *	snapshot_handle structure.  The structure gets updated and a pointer
2211  *	to it should be passed to this function every next time.
2212  *
2213  *	On success the function returns a positive number.  Then, the caller
2214  *	is allowed to write up to the returned number of bytes to the memory
2215  *	location computed by the data_of() macro.
2216  *
2217  *	The function returns 0 to indicate the "end of file" condition,
2218  *	and a negative number is returned on error.  In such cases the
2219  *	structure pointed to by @handle is not updated and should not be used
2220  *	any more.
2221  */
2222 
2223 int snapshot_write_next(struct snapshot_handle *handle)
2224 {
2225 	static struct chain_allocator ca;
2226 	int error = 0;
2227 
2228 	/* Check if we have already loaded the entire image */
2229 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2230 		return 0;
2231 
2232 	handle->sync_read = 1;
2233 
2234 	if (!handle->cur) {
2235 		if (!buffer)
2236 			/* This makes the buffer be freed by swsusp_free() */
2237 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2238 
2239 		if (!buffer)
2240 			return -ENOMEM;
2241 
2242 		handle->buffer = buffer;
2243 	} else if (handle->cur == 1) {
2244 		error = load_header(buffer);
2245 		if (error)
2246 			return error;
2247 
2248 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2249 		if (error)
2250 			return error;
2251 
2252 		/* Allocate buffer for page keys. */
2253 		error = page_key_alloc(nr_copy_pages);
2254 		if (error)
2255 			return error;
2256 
2257 	} else if (handle->cur <= nr_meta_pages + 1) {
2258 		error = unpack_orig_pfns(buffer, &copy_bm);
2259 		if (error)
2260 			return error;
2261 
2262 		if (handle->cur == nr_meta_pages + 1) {
2263 			error = prepare_image(&orig_bm, &copy_bm);
2264 			if (error)
2265 				return error;
2266 
2267 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2268 			memory_bm_position_reset(&orig_bm);
2269 			restore_pblist = NULL;
2270 			handle->buffer = get_buffer(&orig_bm, &ca);
2271 			handle->sync_read = 0;
2272 			if (IS_ERR(handle->buffer))
2273 				return PTR_ERR(handle->buffer);
2274 		}
2275 	} else {
2276 		copy_last_highmem_page();
2277 		/* Restore page key for data page (s390 only). */
2278 		page_key_write(handle->buffer);
2279 		handle->buffer = get_buffer(&orig_bm, &ca);
2280 		if (IS_ERR(handle->buffer))
2281 			return PTR_ERR(handle->buffer);
2282 		if (handle->buffer != buffer)
2283 			handle->sync_read = 0;
2284 	}
2285 	handle->cur++;
2286 	return PAGE_SIZE;
2287 }
2288 
2289 /**
2290  *	snapshot_write_finalize - must be called after the last call to
2291  *	snapshot_write_next() in case the last page in the image happens
2292  *	to be a highmem page and its contents should be stored in the
2293  *	highmem.  Additionally, it releases the memory that will not be
2294  *	used any more.
2295  */
2296 
2297 void snapshot_write_finalize(struct snapshot_handle *handle)
2298 {
2299 	copy_last_highmem_page();
2300 	/* Restore page key for data page (s390 only). */
2301 	page_key_write(handle->buffer);
2302 	page_key_free();
2303 	/* Free only if we have loaded the image entirely */
2304 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2305 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2306 		free_highmem_data();
2307 	}
2308 }
2309 
2310 int snapshot_image_loaded(struct snapshot_handle *handle)
2311 {
2312 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2313 			handle->cur <= nr_meta_pages + nr_copy_pages);
2314 }
2315 
2316 #ifdef CONFIG_HIGHMEM
2317 /* Assumes that @buf is ready and points to a "safe" page */
2318 static inline void
2319 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2320 {
2321 	void *kaddr1, *kaddr2;
2322 
2323 	kaddr1 = kmap_atomic(p1);
2324 	kaddr2 = kmap_atomic(p2);
2325 	copy_page(buf, kaddr1);
2326 	copy_page(kaddr1, kaddr2);
2327 	copy_page(kaddr2, buf);
2328 	kunmap_atomic(kaddr2);
2329 	kunmap_atomic(kaddr1);
2330 }
2331 
2332 /**
2333  *	restore_highmem - for each highmem page that was allocated before
2334  *	the suspend and included in the suspend image, and also has been
2335  *	allocated by the "resume" kernel swap its current (ie. "before
2336  *	resume") contents with the previous (ie. "before suspend") one.
2337  *
2338  *	If the resume eventually fails, we can call this function once
2339  *	again and restore the "before resume" highmem state.
2340  */
2341 
2342 int restore_highmem(void)
2343 {
2344 	struct highmem_pbe *pbe = highmem_pblist;
2345 	void *buf;
2346 
2347 	if (!pbe)
2348 		return 0;
2349 
2350 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2351 	if (!buf)
2352 		return -ENOMEM;
2353 
2354 	while (pbe) {
2355 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2356 		pbe = pbe->next;
2357 	}
2358 	free_image_page(buf, PG_UNSAFE_CLEAR);
2359 	return 0;
2360 }
2361 #endif /* CONFIG_HIGHMEM */
2362