xref: /openbmc/linux/kernel/power/snapshot.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 
31 #include <asm/uaccess.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h>
34 #include <asm/tlbflush.h>
35 #include <asm/io.h>
36 
37 #include "power.h"
38 
39 static int swsusp_page_is_free(struct page *);
40 static void swsusp_set_page_forbidden(struct page *);
41 static void swsusp_unset_page_forbidden(struct page *);
42 
43 /*
44  * Preferred image size in bytes (tunable via /sys/power/image_size).
45  * When it is set to N, swsusp will do its best to ensure the image
46  * size will not exceed N bytes, but if that is impossible, it will
47  * try to create the smallest image possible.
48  */
49 unsigned long image_size;
50 
51 void __init hibernate_image_size_init(void)
52 {
53 	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
54 }
55 
56 /* List of PBEs needed for restoring the pages that were allocated before
57  * the suspend and included in the suspend image, but have also been
58  * allocated by the "resume" kernel, so their contents cannot be written
59  * directly to their "original" page frames.
60  */
61 struct pbe *restore_pblist;
62 
63 /* Pointer to an auxiliary buffer (1 page) */
64 static void *buffer;
65 
66 /**
67  *	@safe_needed - on resume, for storing the PBE list and the image,
68  *	we can only use memory pages that do not conflict with the pages
69  *	used before suspend.  The unsafe pages have PageNosaveFree set
70  *	and we count them using unsafe_pages.
71  *
72  *	Each allocated image page is marked as PageNosave and PageNosaveFree
73  *	so that swsusp_free() can release it.
74  */
75 
76 #define PG_ANY		0
77 #define PG_SAFE		1
78 #define PG_UNSAFE_CLEAR	1
79 #define PG_UNSAFE_KEEP	0
80 
81 static unsigned int allocated_unsafe_pages;
82 
83 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
84 {
85 	void *res;
86 
87 	res = (void *)get_zeroed_page(gfp_mask);
88 	if (safe_needed)
89 		while (res && swsusp_page_is_free(virt_to_page(res))) {
90 			/* The page is unsafe, mark it for swsusp_free() */
91 			swsusp_set_page_forbidden(virt_to_page(res));
92 			allocated_unsafe_pages++;
93 			res = (void *)get_zeroed_page(gfp_mask);
94 		}
95 	if (res) {
96 		swsusp_set_page_forbidden(virt_to_page(res));
97 		swsusp_set_page_free(virt_to_page(res));
98 	}
99 	return res;
100 }
101 
102 unsigned long get_safe_page(gfp_t gfp_mask)
103 {
104 	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
105 }
106 
107 static struct page *alloc_image_page(gfp_t gfp_mask)
108 {
109 	struct page *page;
110 
111 	page = alloc_page(gfp_mask);
112 	if (page) {
113 		swsusp_set_page_forbidden(page);
114 		swsusp_set_page_free(page);
115 	}
116 	return page;
117 }
118 
119 /**
120  *	free_image_page - free page represented by @addr, allocated with
121  *	get_image_page (page flags set by it must be cleared)
122  */
123 
124 static inline void free_image_page(void *addr, int clear_nosave_free)
125 {
126 	struct page *page;
127 
128 	BUG_ON(!virt_addr_valid(addr));
129 
130 	page = virt_to_page(addr);
131 
132 	swsusp_unset_page_forbidden(page);
133 	if (clear_nosave_free)
134 		swsusp_unset_page_free(page);
135 
136 	__free_page(page);
137 }
138 
139 /* struct linked_page is used to build chains of pages */
140 
141 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
142 
143 struct linked_page {
144 	struct linked_page *next;
145 	char data[LINKED_PAGE_DATA_SIZE];
146 } __attribute__((packed));
147 
148 static inline void
149 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
150 {
151 	while (list) {
152 		struct linked_page *lp = list->next;
153 
154 		free_image_page(list, clear_page_nosave);
155 		list = lp;
156 	}
157 }
158 
159 /**
160   *	struct chain_allocator is used for allocating small objects out of
161   *	a linked list of pages called 'the chain'.
162   *
163   *	The chain grows each time when there is no room for a new object in
164   *	the current page.  The allocated objects cannot be freed individually.
165   *	It is only possible to free them all at once, by freeing the entire
166   *	chain.
167   *
168   *	NOTE: The chain allocator may be inefficient if the allocated objects
169   *	are not much smaller than PAGE_SIZE.
170   */
171 
172 struct chain_allocator {
173 	struct linked_page *chain;	/* the chain */
174 	unsigned int used_space;	/* total size of objects allocated out
175 					 * of the current page
176 					 */
177 	gfp_t gfp_mask;		/* mask for allocating pages */
178 	int safe_needed;	/* if set, only "safe" pages are allocated */
179 };
180 
181 static void
182 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
183 {
184 	ca->chain = NULL;
185 	ca->used_space = LINKED_PAGE_DATA_SIZE;
186 	ca->gfp_mask = gfp_mask;
187 	ca->safe_needed = safe_needed;
188 }
189 
190 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
191 {
192 	void *ret;
193 
194 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
195 		struct linked_page *lp;
196 
197 		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
198 		if (!lp)
199 			return NULL;
200 
201 		lp->next = ca->chain;
202 		ca->chain = lp;
203 		ca->used_space = 0;
204 	}
205 	ret = ca->chain->data + ca->used_space;
206 	ca->used_space += size;
207 	return ret;
208 }
209 
210 /**
211  *	Data types related to memory bitmaps.
212  *
213  *	Memory bitmap is a structure consiting of many linked lists of
214  *	objects.  The main list's elements are of type struct zone_bitmap
215  *	and each of them corresonds to one zone.  For each zone bitmap
216  *	object there is a list of objects of type struct bm_block that
217  *	represent each blocks of bitmap in which information is stored.
218  *
219  *	struct memory_bitmap contains a pointer to the main list of zone
220  *	bitmap objects, a struct bm_position used for browsing the bitmap,
221  *	and a pointer to the list of pages used for allocating all of the
222  *	zone bitmap objects and bitmap block objects.
223  *
224  *	NOTE: It has to be possible to lay out the bitmap in memory
225  *	using only allocations of order 0.  Additionally, the bitmap is
226  *	designed to work with arbitrary number of zones (this is over the
227  *	top for now, but let's avoid making unnecessary assumptions ;-).
228  *
229  *	struct zone_bitmap contains a pointer to a list of bitmap block
230  *	objects and a pointer to the bitmap block object that has been
231  *	most recently used for setting bits.  Additionally, it contains the
232  *	pfns that correspond to the start and end of the represented zone.
233  *
234  *	struct bm_block contains a pointer to the memory page in which
235  *	information is stored (in the form of a block of bitmap)
236  *	It also contains the pfns that correspond to the start and end of
237  *	the represented memory area.
238  */
239 
240 #define BM_END_OF_MAP	(~0UL)
241 
242 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
243 
244 struct bm_block {
245 	struct list_head hook;	/* hook into a list of bitmap blocks */
246 	unsigned long start_pfn;	/* pfn represented by the first bit */
247 	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
248 	unsigned long *data;	/* bitmap representing pages */
249 };
250 
251 static inline unsigned long bm_block_bits(struct bm_block *bb)
252 {
253 	return bb->end_pfn - bb->start_pfn;
254 }
255 
256 /* strcut bm_position is used for browsing memory bitmaps */
257 
258 struct bm_position {
259 	struct bm_block *block;
260 	int bit;
261 };
262 
263 struct memory_bitmap {
264 	struct list_head blocks;	/* list of bitmap blocks */
265 	struct linked_page *p_list;	/* list of pages used to store zone
266 					 * bitmap objects and bitmap block
267 					 * objects
268 					 */
269 	struct bm_position cur;	/* most recently used bit position */
270 };
271 
272 /* Functions that operate on memory bitmaps */
273 
274 static void memory_bm_position_reset(struct memory_bitmap *bm)
275 {
276 	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
277 	bm->cur.bit = 0;
278 }
279 
280 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
281 
282 /**
283  *	create_bm_block_list - create a list of block bitmap objects
284  *	@pages - number of pages to track
285  *	@list - list to put the allocated blocks into
286  *	@ca - chain allocator to be used for allocating memory
287  */
288 static int create_bm_block_list(unsigned long pages,
289 				struct list_head *list,
290 				struct chain_allocator *ca)
291 {
292 	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
293 
294 	while (nr_blocks-- > 0) {
295 		struct bm_block *bb;
296 
297 		bb = chain_alloc(ca, sizeof(struct bm_block));
298 		if (!bb)
299 			return -ENOMEM;
300 		list_add(&bb->hook, list);
301 	}
302 
303 	return 0;
304 }
305 
306 struct mem_extent {
307 	struct list_head hook;
308 	unsigned long start;
309 	unsigned long end;
310 };
311 
312 /**
313  *	free_mem_extents - free a list of memory extents
314  *	@list - list of extents to empty
315  */
316 static void free_mem_extents(struct list_head *list)
317 {
318 	struct mem_extent *ext, *aux;
319 
320 	list_for_each_entry_safe(ext, aux, list, hook) {
321 		list_del(&ext->hook);
322 		kfree(ext);
323 	}
324 }
325 
326 /**
327  *	create_mem_extents - create a list of memory extents representing
328  *	                     contiguous ranges of PFNs
329  *	@list - list to put the extents into
330  *	@gfp_mask - mask to use for memory allocations
331  */
332 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
333 {
334 	struct zone *zone;
335 
336 	INIT_LIST_HEAD(list);
337 
338 	for_each_populated_zone(zone) {
339 		unsigned long zone_start, zone_end;
340 		struct mem_extent *ext, *cur, *aux;
341 
342 		zone_start = zone->zone_start_pfn;
343 		zone_end = zone->zone_start_pfn + zone->spanned_pages;
344 
345 		list_for_each_entry(ext, list, hook)
346 			if (zone_start <= ext->end)
347 				break;
348 
349 		if (&ext->hook == list || zone_end < ext->start) {
350 			/* New extent is necessary */
351 			struct mem_extent *new_ext;
352 
353 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
354 			if (!new_ext) {
355 				free_mem_extents(list);
356 				return -ENOMEM;
357 			}
358 			new_ext->start = zone_start;
359 			new_ext->end = zone_end;
360 			list_add_tail(&new_ext->hook, &ext->hook);
361 			continue;
362 		}
363 
364 		/* Merge this zone's range of PFNs with the existing one */
365 		if (zone_start < ext->start)
366 			ext->start = zone_start;
367 		if (zone_end > ext->end)
368 			ext->end = zone_end;
369 
370 		/* More merging may be possible */
371 		cur = ext;
372 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
373 			if (zone_end < cur->start)
374 				break;
375 			if (zone_end < cur->end)
376 				ext->end = cur->end;
377 			list_del(&cur->hook);
378 			kfree(cur);
379 		}
380 	}
381 
382 	return 0;
383 }
384 
385 /**
386   *	memory_bm_create - allocate memory for a memory bitmap
387   */
388 static int
389 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
390 {
391 	struct chain_allocator ca;
392 	struct list_head mem_extents;
393 	struct mem_extent *ext;
394 	int error;
395 
396 	chain_init(&ca, gfp_mask, safe_needed);
397 	INIT_LIST_HEAD(&bm->blocks);
398 
399 	error = create_mem_extents(&mem_extents, gfp_mask);
400 	if (error)
401 		return error;
402 
403 	list_for_each_entry(ext, &mem_extents, hook) {
404 		struct bm_block *bb;
405 		unsigned long pfn = ext->start;
406 		unsigned long pages = ext->end - ext->start;
407 
408 		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
409 
410 		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
411 		if (error)
412 			goto Error;
413 
414 		list_for_each_entry_continue(bb, &bm->blocks, hook) {
415 			bb->data = get_image_page(gfp_mask, safe_needed);
416 			if (!bb->data) {
417 				error = -ENOMEM;
418 				goto Error;
419 			}
420 
421 			bb->start_pfn = pfn;
422 			if (pages >= BM_BITS_PER_BLOCK) {
423 				pfn += BM_BITS_PER_BLOCK;
424 				pages -= BM_BITS_PER_BLOCK;
425 			} else {
426 				/* This is executed only once in the loop */
427 				pfn += pages;
428 			}
429 			bb->end_pfn = pfn;
430 		}
431 	}
432 
433 	bm->p_list = ca.chain;
434 	memory_bm_position_reset(bm);
435  Exit:
436 	free_mem_extents(&mem_extents);
437 	return error;
438 
439  Error:
440 	bm->p_list = ca.chain;
441 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
442 	goto Exit;
443 }
444 
445 /**
446   *	memory_bm_free - free memory occupied by the memory bitmap @bm
447   */
448 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
449 {
450 	struct bm_block *bb;
451 
452 	list_for_each_entry(bb, &bm->blocks, hook)
453 		if (bb->data)
454 			free_image_page(bb->data, clear_nosave_free);
455 
456 	free_list_of_pages(bm->p_list, clear_nosave_free);
457 
458 	INIT_LIST_HEAD(&bm->blocks);
459 }
460 
461 /**
462  *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
463  *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
464  *	of @bm->cur_zone_bm are updated.
465  */
466 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
467 				void **addr, unsigned int *bit_nr)
468 {
469 	struct bm_block *bb;
470 
471 	/*
472 	 * Check if the pfn corresponds to the current bitmap block and find
473 	 * the block where it fits if this is not the case.
474 	 */
475 	bb = bm->cur.block;
476 	if (pfn < bb->start_pfn)
477 		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
478 			if (pfn >= bb->start_pfn)
479 				break;
480 
481 	if (pfn >= bb->end_pfn)
482 		list_for_each_entry_continue(bb, &bm->blocks, hook)
483 			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
484 				break;
485 
486 	if (&bb->hook == &bm->blocks)
487 		return -EFAULT;
488 
489 	/* The block has been found */
490 	bm->cur.block = bb;
491 	pfn -= bb->start_pfn;
492 	bm->cur.bit = pfn + 1;
493 	*bit_nr = pfn;
494 	*addr = bb->data;
495 	return 0;
496 }
497 
498 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
499 {
500 	void *addr;
501 	unsigned int bit;
502 	int error;
503 
504 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
505 	BUG_ON(error);
506 	set_bit(bit, addr);
507 }
508 
509 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
510 {
511 	void *addr;
512 	unsigned int bit;
513 	int error;
514 
515 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
516 	if (!error)
517 		set_bit(bit, addr);
518 	return error;
519 }
520 
521 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
522 {
523 	void *addr;
524 	unsigned int bit;
525 	int error;
526 
527 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
528 	BUG_ON(error);
529 	clear_bit(bit, addr);
530 }
531 
532 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
533 {
534 	void *addr;
535 	unsigned int bit;
536 	int error;
537 
538 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
539 	BUG_ON(error);
540 	return test_bit(bit, addr);
541 }
542 
543 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
544 {
545 	void *addr;
546 	unsigned int bit;
547 
548 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
549 }
550 
551 /**
552  *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
553  *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
554  *	returned.
555  *
556  *	It is required to run memory_bm_position_reset() before the first call to
557  *	this function.
558  */
559 
560 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
561 {
562 	struct bm_block *bb;
563 	int bit;
564 
565 	bb = bm->cur.block;
566 	do {
567 		bit = bm->cur.bit;
568 		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
569 		if (bit < bm_block_bits(bb))
570 			goto Return_pfn;
571 
572 		bb = list_entry(bb->hook.next, struct bm_block, hook);
573 		bm->cur.block = bb;
574 		bm->cur.bit = 0;
575 	} while (&bb->hook != &bm->blocks);
576 
577 	memory_bm_position_reset(bm);
578 	return BM_END_OF_MAP;
579 
580  Return_pfn:
581 	bm->cur.bit = bit + 1;
582 	return bb->start_pfn + bit;
583 }
584 
585 /**
586  *	This structure represents a range of page frames the contents of which
587  *	should not be saved during the suspend.
588  */
589 
590 struct nosave_region {
591 	struct list_head list;
592 	unsigned long start_pfn;
593 	unsigned long end_pfn;
594 };
595 
596 static LIST_HEAD(nosave_regions);
597 
598 /**
599  *	register_nosave_region - register a range of page frames the contents
600  *	of which should not be saved during the suspend (to be used in the early
601  *	initialization code)
602  */
603 
604 void __init
605 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
606 			 int use_kmalloc)
607 {
608 	struct nosave_region *region;
609 
610 	if (start_pfn >= end_pfn)
611 		return;
612 
613 	if (!list_empty(&nosave_regions)) {
614 		/* Try to extend the previous region (they should be sorted) */
615 		region = list_entry(nosave_regions.prev,
616 					struct nosave_region, list);
617 		if (region->end_pfn == start_pfn) {
618 			region->end_pfn = end_pfn;
619 			goto Report;
620 		}
621 	}
622 	if (use_kmalloc) {
623 		/* during init, this shouldn't fail */
624 		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
625 		BUG_ON(!region);
626 	} else
627 		/* This allocation cannot fail */
628 		region = alloc_bootmem(sizeof(struct nosave_region));
629 	region->start_pfn = start_pfn;
630 	region->end_pfn = end_pfn;
631 	list_add_tail(&region->list, &nosave_regions);
632  Report:
633 	printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
634 		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
635 }
636 
637 /*
638  * Set bits in this map correspond to the page frames the contents of which
639  * should not be saved during the suspend.
640  */
641 static struct memory_bitmap *forbidden_pages_map;
642 
643 /* Set bits in this map correspond to free page frames. */
644 static struct memory_bitmap *free_pages_map;
645 
646 /*
647  * Each page frame allocated for creating the image is marked by setting the
648  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
649  */
650 
651 void swsusp_set_page_free(struct page *page)
652 {
653 	if (free_pages_map)
654 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
655 }
656 
657 static int swsusp_page_is_free(struct page *page)
658 {
659 	return free_pages_map ?
660 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
661 }
662 
663 void swsusp_unset_page_free(struct page *page)
664 {
665 	if (free_pages_map)
666 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
667 }
668 
669 static void swsusp_set_page_forbidden(struct page *page)
670 {
671 	if (forbidden_pages_map)
672 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
673 }
674 
675 int swsusp_page_is_forbidden(struct page *page)
676 {
677 	return forbidden_pages_map ?
678 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
679 }
680 
681 static void swsusp_unset_page_forbidden(struct page *page)
682 {
683 	if (forbidden_pages_map)
684 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
685 }
686 
687 /**
688  *	mark_nosave_pages - set bits corresponding to the page frames the
689  *	contents of which should not be saved in a given bitmap.
690  */
691 
692 static void mark_nosave_pages(struct memory_bitmap *bm)
693 {
694 	struct nosave_region *region;
695 
696 	if (list_empty(&nosave_regions))
697 		return;
698 
699 	list_for_each_entry(region, &nosave_regions, list) {
700 		unsigned long pfn;
701 
702 		pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
703 				region->start_pfn << PAGE_SHIFT,
704 				region->end_pfn << PAGE_SHIFT);
705 
706 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
707 			if (pfn_valid(pfn)) {
708 				/*
709 				 * It is safe to ignore the result of
710 				 * mem_bm_set_bit_check() here, since we won't
711 				 * touch the PFNs for which the error is
712 				 * returned anyway.
713 				 */
714 				mem_bm_set_bit_check(bm, pfn);
715 			}
716 	}
717 }
718 
719 /**
720  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
721  *	frames that should not be saved and free page frames.  The pointers
722  *	forbidden_pages_map and free_pages_map are only modified if everything
723  *	goes well, because we don't want the bits to be used before both bitmaps
724  *	are set up.
725  */
726 
727 int create_basic_memory_bitmaps(void)
728 {
729 	struct memory_bitmap *bm1, *bm2;
730 	int error = 0;
731 
732 	BUG_ON(forbidden_pages_map || free_pages_map);
733 
734 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
735 	if (!bm1)
736 		return -ENOMEM;
737 
738 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
739 	if (error)
740 		goto Free_first_object;
741 
742 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
743 	if (!bm2)
744 		goto Free_first_bitmap;
745 
746 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
747 	if (error)
748 		goto Free_second_object;
749 
750 	forbidden_pages_map = bm1;
751 	free_pages_map = bm2;
752 	mark_nosave_pages(forbidden_pages_map);
753 
754 	pr_debug("PM: Basic memory bitmaps created\n");
755 
756 	return 0;
757 
758  Free_second_object:
759 	kfree(bm2);
760  Free_first_bitmap:
761  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
762  Free_first_object:
763 	kfree(bm1);
764 	return -ENOMEM;
765 }
766 
767 /**
768  *	free_basic_memory_bitmaps - free memory bitmaps allocated by
769  *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
770  *	so that the bitmaps themselves are not referred to while they are being
771  *	freed.
772  */
773 
774 void free_basic_memory_bitmaps(void)
775 {
776 	struct memory_bitmap *bm1, *bm2;
777 
778 	BUG_ON(!(forbidden_pages_map && free_pages_map));
779 
780 	bm1 = forbidden_pages_map;
781 	bm2 = free_pages_map;
782 	forbidden_pages_map = NULL;
783 	free_pages_map = NULL;
784 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
785 	kfree(bm1);
786 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
787 	kfree(bm2);
788 
789 	pr_debug("PM: Basic memory bitmaps freed\n");
790 }
791 
792 /**
793  *	snapshot_additional_pages - estimate the number of additional pages
794  *	be needed for setting up the suspend image data structures for given
795  *	zone (usually the returned value is greater than the exact number)
796  */
797 
798 unsigned int snapshot_additional_pages(struct zone *zone)
799 {
800 	unsigned int res;
801 
802 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
803 	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
804 	return 2 * res;
805 }
806 
807 #ifdef CONFIG_HIGHMEM
808 /**
809  *	count_free_highmem_pages - compute the total number of free highmem
810  *	pages, system-wide.
811  */
812 
813 static unsigned int count_free_highmem_pages(void)
814 {
815 	struct zone *zone;
816 	unsigned int cnt = 0;
817 
818 	for_each_populated_zone(zone)
819 		if (is_highmem(zone))
820 			cnt += zone_page_state(zone, NR_FREE_PAGES);
821 
822 	return cnt;
823 }
824 
825 /**
826  *	saveable_highmem_page - Determine whether a highmem page should be
827  *	included in the suspend image.
828  *
829  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
830  *	and it isn't a part of a free chunk of pages.
831  */
832 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
833 {
834 	struct page *page;
835 
836 	if (!pfn_valid(pfn))
837 		return NULL;
838 
839 	page = pfn_to_page(pfn);
840 	if (page_zone(page) != zone)
841 		return NULL;
842 
843 	BUG_ON(!PageHighMem(page));
844 
845 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
846 	    PageReserved(page))
847 		return NULL;
848 
849 	return page;
850 }
851 
852 /**
853  *	count_highmem_pages - compute the total number of saveable highmem
854  *	pages.
855  */
856 
857 static unsigned int count_highmem_pages(void)
858 {
859 	struct zone *zone;
860 	unsigned int n = 0;
861 
862 	for_each_populated_zone(zone) {
863 		unsigned long pfn, max_zone_pfn;
864 
865 		if (!is_highmem(zone))
866 			continue;
867 
868 		mark_free_pages(zone);
869 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
870 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
871 			if (saveable_highmem_page(zone, pfn))
872 				n++;
873 	}
874 	return n;
875 }
876 #else
877 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
878 {
879 	return NULL;
880 }
881 #endif /* CONFIG_HIGHMEM */
882 
883 /**
884  *	saveable_page - Determine whether a non-highmem page should be included
885  *	in the suspend image.
886  *
887  *	We should save the page if it isn't Nosave, and is not in the range
888  *	of pages statically defined as 'unsaveable', and it isn't a part of
889  *	a free chunk of pages.
890  */
891 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
892 {
893 	struct page *page;
894 
895 	if (!pfn_valid(pfn))
896 		return NULL;
897 
898 	page = pfn_to_page(pfn);
899 	if (page_zone(page) != zone)
900 		return NULL;
901 
902 	BUG_ON(PageHighMem(page));
903 
904 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
905 		return NULL;
906 
907 	if (PageReserved(page)
908 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
909 		return NULL;
910 
911 	return page;
912 }
913 
914 /**
915  *	count_data_pages - compute the total number of saveable non-highmem
916  *	pages.
917  */
918 
919 static unsigned int count_data_pages(void)
920 {
921 	struct zone *zone;
922 	unsigned long pfn, max_zone_pfn;
923 	unsigned int n = 0;
924 
925 	for_each_populated_zone(zone) {
926 		if (is_highmem(zone))
927 			continue;
928 
929 		mark_free_pages(zone);
930 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
931 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
932 			if (saveable_page(zone, pfn))
933 				n++;
934 	}
935 	return n;
936 }
937 
938 /* This is needed, because copy_page and memcpy are not usable for copying
939  * task structs.
940  */
941 static inline void do_copy_page(long *dst, long *src)
942 {
943 	int n;
944 
945 	for (n = PAGE_SIZE / sizeof(long); n; n--)
946 		*dst++ = *src++;
947 }
948 
949 
950 /**
951  *	safe_copy_page - check if the page we are going to copy is marked as
952  *		present in the kernel page tables (this always is the case if
953  *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
954  *		kernel_page_present() always returns 'true').
955  */
956 static void safe_copy_page(void *dst, struct page *s_page)
957 {
958 	if (kernel_page_present(s_page)) {
959 		do_copy_page(dst, page_address(s_page));
960 	} else {
961 		kernel_map_pages(s_page, 1, 1);
962 		do_copy_page(dst, page_address(s_page));
963 		kernel_map_pages(s_page, 1, 0);
964 	}
965 }
966 
967 
968 #ifdef CONFIG_HIGHMEM
969 static inline struct page *
970 page_is_saveable(struct zone *zone, unsigned long pfn)
971 {
972 	return is_highmem(zone) ?
973 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
974 }
975 
976 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
977 {
978 	struct page *s_page, *d_page;
979 	void *src, *dst;
980 
981 	s_page = pfn_to_page(src_pfn);
982 	d_page = pfn_to_page(dst_pfn);
983 	if (PageHighMem(s_page)) {
984 		src = kmap_atomic(s_page, KM_USER0);
985 		dst = kmap_atomic(d_page, KM_USER1);
986 		do_copy_page(dst, src);
987 		kunmap_atomic(dst, KM_USER1);
988 		kunmap_atomic(src, KM_USER0);
989 	} else {
990 		if (PageHighMem(d_page)) {
991 			/* Page pointed to by src may contain some kernel
992 			 * data modified by kmap_atomic()
993 			 */
994 			safe_copy_page(buffer, s_page);
995 			dst = kmap_atomic(d_page, KM_USER0);
996 			copy_page(dst, buffer);
997 			kunmap_atomic(dst, KM_USER0);
998 		} else {
999 			safe_copy_page(page_address(d_page), s_page);
1000 		}
1001 	}
1002 }
1003 #else
1004 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1005 
1006 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1007 {
1008 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1009 				pfn_to_page(src_pfn));
1010 }
1011 #endif /* CONFIG_HIGHMEM */
1012 
1013 static void
1014 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1015 {
1016 	struct zone *zone;
1017 	unsigned long pfn;
1018 
1019 	for_each_populated_zone(zone) {
1020 		unsigned long max_zone_pfn;
1021 
1022 		mark_free_pages(zone);
1023 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1024 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1025 			if (page_is_saveable(zone, pfn))
1026 				memory_bm_set_bit(orig_bm, pfn);
1027 	}
1028 	memory_bm_position_reset(orig_bm);
1029 	memory_bm_position_reset(copy_bm);
1030 	for(;;) {
1031 		pfn = memory_bm_next_pfn(orig_bm);
1032 		if (unlikely(pfn == BM_END_OF_MAP))
1033 			break;
1034 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1035 	}
1036 }
1037 
1038 /* Total number of image pages */
1039 static unsigned int nr_copy_pages;
1040 /* Number of pages needed for saving the original pfns of the image pages */
1041 static unsigned int nr_meta_pages;
1042 /*
1043  * Numbers of normal and highmem page frames allocated for hibernation image
1044  * before suspending devices.
1045  */
1046 unsigned int alloc_normal, alloc_highmem;
1047 /*
1048  * Memory bitmap used for marking saveable pages (during hibernation) or
1049  * hibernation image pages (during restore)
1050  */
1051 static struct memory_bitmap orig_bm;
1052 /*
1053  * Memory bitmap used during hibernation for marking allocated page frames that
1054  * will contain copies of saveable pages.  During restore it is initially used
1055  * for marking hibernation image pages, but then the set bits from it are
1056  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1057  * used for marking "safe" highmem pages, but it has to be reinitialized for
1058  * this purpose.
1059  */
1060 static struct memory_bitmap copy_bm;
1061 
1062 /**
1063  *	swsusp_free - free pages allocated for the suspend.
1064  *
1065  *	Suspend pages are alocated before the atomic copy is made, so we
1066  *	need to release them after the resume.
1067  */
1068 
1069 void swsusp_free(void)
1070 {
1071 	struct zone *zone;
1072 	unsigned long pfn, max_zone_pfn;
1073 
1074 	for_each_populated_zone(zone) {
1075 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1076 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1077 			if (pfn_valid(pfn)) {
1078 				struct page *page = pfn_to_page(pfn);
1079 
1080 				if (swsusp_page_is_forbidden(page) &&
1081 				    swsusp_page_is_free(page)) {
1082 					swsusp_unset_page_forbidden(page);
1083 					swsusp_unset_page_free(page);
1084 					__free_page(page);
1085 				}
1086 			}
1087 	}
1088 	nr_copy_pages = 0;
1089 	nr_meta_pages = 0;
1090 	restore_pblist = NULL;
1091 	buffer = NULL;
1092 	alloc_normal = 0;
1093 	alloc_highmem = 0;
1094 }
1095 
1096 /* Helper functions used for the shrinking of memory. */
1097 
1098 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1099 
1100 /**
1101  * preallocate_image_pages - Allocate a number of pages for hibernation image
1102  * @nr_pages: Number of page frames to allocate.
1103  * @mask: GFP flags to use for the allocation.
1104  *
1105  * Return value: Number of page frames actually allocated
1106  */
1107 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1108 {
1109 	unsigned long nr_alloc = 0;
1110 
1111 	while (nr_pages > 0) {
1112 		struct page *page;
1113 
1114 		page = alloc_image_page(mask);
1115 		if (!page)
1116 			break;
1117 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1118 		if (PageHighMem(page))
1119 			alloc_highmem++;
1120 		else
1121 			alloc_normal++;
1122 		nr_pages--;
1123 		nr_alloc++;
1124 	}
1125 
1126 	return nr_alloc;
1127 }
1128 
1129 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1130 					      unsigned long avail_normal)
1131 {
1132 	unsigned long alloc;
1133 
1134 	if (avail_normal <= alloc_normal)
1135 		return 0;
1136 
1137 	alloc = avail_normal - alloc_normal;
1138 	if (nr_pages < alloc)
1139 		alloc = nr_pages;
1140 
1141 	return preallocate_image_pages(alloc, GFP_IMAGE);
1142 }
1143 
1144 #ifdef CONFIG_HIGHMEM
1145 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1146 {
1147 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1148 }
1149 
1150 /**
1151  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1152  */
1153 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1154 {
1155 	x *= multiplier;
1156 	do_div(x, base);
1157 	return (unsigned long)x;
1158 }
1159 
1160 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1161 						unsigned long highmem,
1162 						unsigned long total)
1163 {
1164 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1165 
1166 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1167 }
1168 #else /* CONFIG_HIGHMEM */
1169 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1170 {
1171 	return 0;
1172 }
1173 
1174 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1175 						unsigned long highmem,
1176 						unsigned long total)
1177 {
1178 	return 0;
1179 }
1180 #endif /* CONFIG_HIGHMEM */
1181 
1182 /**
1183  * free_unnecessary_pages - Release preallocated pages not needed for the image
1184  */
1185 static void free_unnecessary_pages(void)
1186 {
1187 	unsigned long save, to_free_normal, to_free_highmem;
1188 
1189 	save = count_data_pages();
1190 	if (alloc_normal >= save) {
1191 		to_free_normal = alloc_normal - save;
1192 		save = 0;
1193 	} else {
1194 		to_free_normal = 0;
1195 		save -= alloc_normal;
1196 	}
1197 	save += count_highmem_pages();
1198 	if (alloc_highmem >= save) {
1199 		to_free_highmem = alloc_highmem - save;
1200 	} else {
1201 		to_free_highmem = 0;
1202 		to_free_normal -= save - alloc_highmem;
1203 	}
1204 
1205 	memory_bm_position_reset(&copy_bm);
1206 
1207 	while (to_free_normal > 0 || to_free_highmem > 0) {
1208 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1209 		struct page *page = pfn_to_page(pfn);
1210 
1211 		if (PageHighMem(page)) {
1212 			if (!to_free_highmem)
1213 				continue;
1214 			to_free_highmem--;
1215 			alloc_highmem--;
1216 		} else {
1217 			if (!to_free_normal)
1218 				continue;
1219 			to_free_normal--;
1220 			alloc_normal--;
1221 		}
1222 		memory_bm_clear_bit(&copy_bm, pfn);
1223 		swsusp_unset_page_forbidden(page);
1224 		swsusp_unset_page_free(page);
1225 		__free_page(page);
1226 	}
1227 }
1228 
1229 /**
1230  * minimum_image_size - Estimate the minimum acceptable size of an image
1231  * @saveable: Number of saveable pages in the system.
1232  *
1233  * We want to avoid attempting to free too much memory too hard, so estimate the
1234  * minimum acceptable size of a hibernation image to use as the lower limit for
1235  * preallocating memory.
1236  *
1237  * We assume that the minimum image size should be proportional to
1238  *
1239  * [number of saveable pages] - [number of pages that can be freed in theory]
1240  *
1241  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1242  * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
1243  * minus mapped file pages.
1244  */
1245 static unsigned long minimum_image_size(unsigned long saveable)
1246 {
1247 	unsigned long size;
1248 
1249 	size = global_page_state(NR_SLAB_RECLAIMABLE)
1250 		+ global_page_state(NR_ACTIVE_ANON)
1251 		+ global_page_state(NR_INACTIVE_ANON)
1252 		+ global_page_state(NR_ACTIVE_FILE)
1253 		+ global_page_state(NR_INACTIVE_FILE)
1254 		- global_page_state(NR_FILE_MAPPED);
1255 
1256 	return saveable <= size ? 0 : saveable - size;
1257 }
1258 
1259 /**
1260  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1261  *
1262  * To create a hibernation image it is necessary to make a copy of every page
1263  * frame in use.  We also need a number of page frames to be free during
1264  * hibernation for allocations made while saving the image and for device
1265  * drivers, in case they need to allocate memory from their hibernation
1266  * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES,
1267  * respectively, both of which are rough estimates).  To make this happen, we
1268  * compute the total number of available page frames and allocate at least
1269  *
1270  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES
1271  *
1272  * of them, which corresponds to the maximum size of a hibernation image.
1273  *
1274  * If image_size is set below the number following from the above formula,
1275  * the preallocation of memory is continued until the total number of saveable
1276  * pages in the system is below the requested image size or the minimum
1277  * acceptable image size returned by minimum_image_size(), whichever is greater.
1278  */
1279 int hibernate_preallocate_memory(void)
1280 {
1281 	struct zone *zone;
1282 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1283 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1284 	struct timeval start, stop;
1285 	int error;
1286 
1287 	printk(KERN_INFO "PM: Preallocating image memory... ");
1288 	do_gettimeofday(&start);
1289 
1290 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1291 	if (error)
1292 		goto err_out;
1293 
1294 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1295 	if (error)
1296 		goto err_out;
1297 
1298 	alloc_normal = 0;
1299 	alloc_highmem = 0;
1300 
1301 	/* Count the number of saveable data pages. */
1302 	save_highmem = count_highmem_pages();
1303 	saveable = count_data_pages();
1304 
1305 	/*
1306 	 * Compute the total number of page frames we can use (count) and the
1307 	 * number of pages needed for image metadata (size).
1308 	 */
1309 	count = saveable;
1310 	saveable += save_highmem;
1311 	highmem = save_highmem;
1312 	size = 0;
1313 	for_each_populated_zone(zone) {
1314 		size += snapshot_additional_pages(zone);
1315 		if (is_highmem(zone))
1316 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1317 		else
1318 			count += zone_page_state(zone, NR_FREE_PAGES);
1319 	}
1320 	avail_normal = count;
1321 	count += highmem;
1322 	count -= totalreserve_pages;
1323 
1324 	/* Compute the maximum number of saveable pages to leave in memory. */
1325 	max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
1326 	/* Compute the desired number of image pages specified by image_size. */
1327 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1328 	if (size > max_size)
1329 		size = max_size;
1330 	/*
1331 	 * If the desired number of image pages is at least as large as the
1332 	 * current number of saveable pages in memory, allocate page frames for
1333 	 * the image and we're done.
1334 	 */
1335 	if (size >= saveable) {
1336 		pages = preallocate_image_highmem(save_highmem);
1337 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1338 		goto out;
1339 	}
1340 
1341 	/* Estimate the minimum size of the image. */
1342 	pages = minimum_image_size(saveable);
1343 	/*
1344 	 * To avoid excessive pressure on the normal zone, leave room in it to
1345 	 * accommodate an image of the minimum size (unless it's already too
1346 	 * small, in which case don't preallocate pages from it at all).
1347 	 */
1348 	if (avail_normal > pages)
1349 		avail_normal -= pages;
1350 	else
1351 		avail_normal = 0;
1352 	if (size < pages)
1353 		size = min_t(unsigned long, pages, max_size);
1354 
1355 	/*
1356 	 * Let the memory management subsystem know that we're going to need a
1357 	 * large number of page frames to allocate and make it free some memory.
1358 	 * NOTE: If this is not done, performance will be hurt badly in some
1359 	 * test cases.
1360 	 */
1361 	shrink_all_memory(saveable - size);
1362 
1363 	/*
1364 	 * The number of saveable pages in memory was too high, so apply some
1365 	 * pressure to decrease it.  First, make room for the largest possible
1366 	 * image and fail if that doesn't work.  Next, try to decrease the size
1367 	 * of the image as much as indicated by 'size' using allocations from
1368 	 * highmem and non-highmem zones separately.
1369 	 */
1370 	pages_highmem = preallocate_image_highmem(highmem / 2);
1371 	alloc = (count - max_size) - pages_highmem;
1372 	pages = preallocate_image_memory(alloc, avail_normal);
1373 	if (pages < alloc) {
1374 		/* We have exhausted non-highmem pages, try highmem. */
1375 		alloc -= pages;
1376 		pages += pages_highmem;
1377 		pages_highmem = preallocate_image_highmem(alloc);
1378 		if (pages_highmem < alloc)
1379 			goto err_out;
1380 		pages += pages_highmem;
1381 		/*
1382 		 * size is the desired number of saveable pages to leave in
1383 		 * memory, so try to preallocate (all memory - size) pages.
1384 		 */
1385 		alloc = (count - pages) - size;
1386 		pages += preallocate_image_highmem(alloc);
1387 	} else {
1388 		/*
1389 		 * There are approximately max_size saveable pages at this point
1390 		 * and we want to reduce this number down to size.
1391 		 */
1392 		alloc = max_size - size;
1393 		size = preallocate_highmem_fraction(alloc, highmem, count);
1394 		pages_highmem += size;
1395 		alloc -= size;
1396 		size = preallocate_image_memory(alloc, avail_normal);
1397 		pages_highmem += preallocate_image_highmem(alloc - size);
1398 		pages += pages_highmem + size;
1399 	}
1400 
1401 	/*
1402 	 * We only need as many page frames for the image as there are saveable
1403 	 * pages in memory, but we have allocated more.  Release the excessive
1404 	 * ones now.
1405 	 */
1406 	free_unnecessary_pages();
1407 
1408  out:
1409 	do_gettimeofday(&stop);
1410 	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1411 	swsusp_show_speed(&start, &stop, pages, "Allocated");
1412 
1413 	return 0;
1414 
1415  err_out:
1416 	printk(KERN_CONT "\n");
1417 	swsusp_free();
1418 	return -ENOMEM;
1419 }
1420 
1421 #ifdef CONFIG_HIGHMEM
1422 /**
1423   *	count_pages_for_highmem - compute the number of non-highmem pages
1424   *	that will be necessary for creating copies of highmem pages.
1425   */
1426 
1427 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1428 {
1429 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1430 
1431 	if (free_highmem >= nr_highmem)
1432 		nr_highmem = 0;
1433 	else
1434 		nr_highmem -= free_highmem;
1435 
1436 	return nr_highmem;
1437 }
1438 #else
1439 static unsigned int
1440 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1441 #endif /* CONFIG_HIGHMEM */
1442 
1443 /**
1444  *	enough_free_mem - Make sure we have enough free memory for the
1445  *	snapshot image.
1446  */
1447 
1448 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1449 {
1450 	struct zone *zone;
1451 	unsigned int free = alloc_normal;
1452 
1453 	for_each_populated_zone(zone)
1454 		if (!is_highmem(zone))
1455 			free += zone_page_state(zone, NR_FREE_PAGES);
1456 
1457 	nr_pages += count_pages_for_highmem(nr_highmem);
1458 	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1459 		nr_pages, PAGES_FOR_IO, free);
1460 
1461 	return free > nr_pages + PAGES_FOR_IO;
1462 }
1463 
1464 #ifdef CONFIG_HIGHMEM
1465 /**
1466  *	get_highmem_buffer - if there are some highmem pages in the suspend
1467  *	image, we may need the buffer to copy them and/or load their data.
1468  */
1469 
1470 static inline int get_highmem_buffer(int safe_needed)
1471 {
1472 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1473 	return buffer ? 0 : -ENOMEM;
1474 }
1475 
1476 /**
1477  *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1478  *	Try to allocate as many pages as needed, but if the number of free
1479  *	highmem pages is lesser than that, allocate them all.
1480  */
1481 
1482 static inline unsigned int
1483 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1484 {
1485 	unsigned int to_alloc = count_free_highmem_pages();
1486 
1487 	if (to_alloc > nr_highmem)
1488 		to_alloc = nr_highmem;
1489 
1490 	nr_highmem -= to_alloc;
1491 	while (to_alloc-- > 0) {
1492 		struct page *page;
1493 
1494 		page = alloc_image_page(__GFP_HIGHMEM);
1495 		memory_bm_set_bit(bm, page_to_pfn(page));
1496 	}
1497 	return nr_highmem;
1498 }
1499 #else
1500 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1501 
1502 static inline unsigned int
1503 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1504 #endif /* CONFIG_HIGHMEM */
1505 
1506 /**
1507  *	swsusp_alloc - allocate memory for the suspend image
1508  *
1509  *	We first try to allocate as many highmem pages as there are
1510  *	saveable highmem pages in the system.  If that fails, we allocate
1511  *	non-highmem pages for the copies of the remaining highmem ones.
1512  *
1513  *	In this approach it is likely that the copies of highmem pages will
1514  *	also be located in the high memory, because of the way in which
1515  *	copy_data_pages() works.
1516  */
1517 
1518 static int
1519 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1520 		unsigned int nr_pages, unsigned int nr_highmem)
1521 {
1522 	int error = 0;
1523 
1524 	if (nr_highmem > 0) {
1525 		error = get_highmem_buffer(PG_ANY);
1526 		if (error)
1527 			goto err_out;
1528 		if (nr_highmem > alloc_highmem) {
1529 			nr_highmem -= alloc_highmem;
1530 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1531 		}
1532 	}
1533 	if (nr_pages > alloc_normal) {
1534 		nr_pages -= alloc_normal;
1535 		while (nr_pages-- > 0) {
1536 			struct page *page;
1537 
1538 			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1539 			if (!page)
1540 				goto err_out;
1541 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1542 		}
1543 	}
1544 
1545 	return 0;
1546 
1547  err_out:
1548 	swsusp_free();
1549 	return error;
1550 }
1551 
1552 asmlinkage int swsusp_save(void)
1553 {
1554 	unsigned int nr_pages, nr_highmem;
1555 
1556 	printk(KERN_INFO "PM: Creating hibernation image:\n");
1557 
1558 	drain_local_pages(NULL);
1559 	nr_pages = count_data_pages();
1560 	nr_highmem = count_highmem_pages();
1561 	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1562 
1563 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1564 		printk(KERN_ERR "PM: Not enough free memory\n");
1565 		return -ENOMEM;
1566 	}
1567 
1568 	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1569 		printk(KERN_ERR "PM: Memory allocation failed\n");
1570 		return -ENOMEM;
1571 	}
1572 
1573 	/* During allocating of suspend pagedir, new cold pages may appear.
1574 	 * Kill them.
1575 	 */
1576 	drain_local_pages(NULL);
1577 	copy_data_pages(&copy_bm, &orig_bm);
1578 
1579 	/*
1580 	 * End of critical section. From now on, we can write to memory,
1581 	 * but we should not touch disk. This specially means we must _not_
1582 	 * touch swap space! Except we must write out our image of course.
1583 	 */
1584 
1585 	nr_pages += nr_highmem;
1586 	nr_copy_pages = nr_pages;
1587 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1588 
1589 	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1590 		nr_pages);
1591 
1592 	return 0;
1593 }
1594 
1595 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1596 static int init_header_complete(struct swsusp_info *info)
1597 {
1598 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1599 	info->version_code = LINUX_VERSION_CODE;
1600 	return 0;
1601 }
1602 
1603 static char *check_image_kernel(struct swsusp_info *info)
1604 {
1605 	if (info->version_code != LINUX_VERSION_CODE)
1606 		return "kernel version";
1607 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1608 		return "system type";
1609 	if (strcmp(info->uts.release,init_utsname()->release))
1610 		return "kernel release";
1611 	if (strcmp(info->uts.version,init_utsname()->version))
1612 		return "version";
1613 	if (strcmp(info->uts.machine,init_utsname()->machine))
1614 		return "machine";
1615 	return NULL;
1616 }
1617 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1618 
1619 unsigned long snapshot_get_image_size(void)
1620 {
1621 	return nr_copy_pages + nr_meta_pages + 1;
1622 }
1623 
1624 static int init_header(struct swsusp_info *info)
1625 {
1626 	memset(info, 0, sizeof(struct swsusp_info));
1627 	info->num_physpages = num_physpages;
1628 	info->image_pages = nr_copy_pages;
1629 	info->pages = snapshot_get_image_size();
1630 	info->size = info->pages;
1631 	info->size <<= PAGE_SHIFT;
1632 	return init_header_complete(info);
1633 }
1634 
1635 /**
1636  *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1637  *	are stored in the array @buf[] (1 page at a time)
1638  */
1639 
1640 static inline void
1641 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1642 {
1643 	int j;
1644 
1645 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1646 		buf[j] = memory_bm_next_pfn(bm);
1647 		if (unlikely(buf[j] == BM_END_OF_MAP))
1648 			break;
1649 	}
1650 }
1651 
1652 /**
1653  *	snapshot_read_next - used for reading the system memory snapshot.
1654  *
1655  *	On the first call to it @handle should point to a zeroed
1656  *	snapshot_handle structure.  The structure gets updated and a pointer
1657  *	to it should be passed to this function every next time.
1658  *
1659  *	On success the function returns a positive number.  Then, the caller
1660  *	is allowed to read up to the returned number of bytes from the memory
1661  *	location computed by the data_of() macro.
1662  *
1663  *	The function returns 0 to indicate the end of data stream condition,
1664  *	and a negative number is returned on error.  In such cases the
1665  *	structure pointed to by @handle is not updated and should not be used
1666  *	any more.
1667  */
1668 
1669 int snapshot_read_next(struct snapshot_handle *handle)
1670 {
1671 	if (handle->cur > nr_meta_pages + nr_copy_pages)
1672 		return 0;
1673 
1674 	if (!buffer) {
1675 		/* This makes the buffer be freed by swsusp_free() */
1676 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1677 		if (!buffer)
1678 			return -ENOMEM;
1679 	}
1680 	if (!handle->cur) {
1681 		int error;
1682 
1683 		error = init_header((struct swsusp_info *)buffer);
1684 		if (error)
1685 			return error;
1686 		handle->buffer = buffer;
1687 		memory_bm_position_reset(&orig_bm);
1688 		memory_bm_position_reset(&copy_bm);
1689 	} else if (handle->cur <= nr_meta_pages) {
1690 		clear_page(buffer);
1691 		pack_pfns(buffer, &orig_bm);
1692 	} else {
1693 		struct page *page;
1694 
1695 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1696 		if (PageHighMem(page)) {
1697 			/* Highmem pages are copied to the buffer,
1698 			 * because we can't return with a kmapped
1699 			 * highmem page (we may not be called again).
1700 			 */
1701 			void *kaddr;
1702 
1703 			kaddr = kmap_atomic(page, KM_USER0);
1704 			copy_page(buffer, kaddr);
1705 			kunmap_atomic(kaddr, KM_USER0);
1706 			handle->buffer = buffer;
1707 		} else {
1708 			handle->buffer = page_address(page);
1709 		}
1710 	}
1711 	handle->cur++;
1712 	return PAGE_SIZE;
1713 }
1714 
1715 /**
1716  *	mark_unsafe_pages - mark the pages that cannot be used for storing
1717  *	the image during resume, because they conflict with the pages that
1718  *	had been used before suspend
1719  */
1720 
1721 static int mark_unsafe_pages(struct memory_bitmap *bm)
1722 {
1723 	struct zone *zone;
1724 	unsigned long pfn, max_zone_pfn;
1725 
1726 	/* Clear page flags */
1727 	for_each_populated_zone(zone) {
1728 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1729 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1730 			if (pfn_valid(pfn))
1731 				swsusp_unset_page_free(pfn_to_page(pfn));
1732 	}
1733 
1734 	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1735 	memory_bm_position_reset(bm);
1736 	do {
1737 		pfn = memory_bm_next_pfn(bm);
1738 		if (likely(pfn != BM_END_OF_MAP)) {
1739 			if (likely(pfn_valid(pfn)))
1740 				swsusp_set_page_free(pfn_to_page(pfn));
1741 			else
1742 				return -EFAULT;
1743 		}
1744 	} while (pfn != BM_END_OF_MAP);
1745 
1746 	allocated_unsafe_pages = 0;
1747 
1748 	return 0;
1749 }
1750 
1751 static void
1752 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1753 {
1754 	unsigned long pfn;
1755 
1756 	memory_bm_position_reset(src);
1757 	pfn = memory_bm_next_pfn(src);
1758 	while (pfn != BM_END_OF_MAP) {
1759 		memory_bm_set_bit(dst, pfn);
1760 		pfn = memory_bm_next_pfn(src);
1761 	}
1762 }
1763 
1764 static int check_header(struct swsusp_info *info)
1765 {
1766 	char *reason;
1767 
1768 	reason = check_image_kernel(info);
1769 	if (!reason && info->num_physpages != num_physpages)
1770 		reason = "memory size";
1771 	if (reason) {
1772 		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1773 		return -EPERM;
1774 	}
1775 	return 0;
1776 }
1777 
1778 /**
1779  *	load header - check the image header and copy data from it
1780  */
1781 
1782 static int
1783 load_header(struct swsusp_info *info)
1784 {
1785 	int error;
1786 
1787 	restore_pblist = NULL;
1788 	error = check_header(info);
1789 	if (!error) {
1790 		nr_copy_pages = info->image_pages;
1791 		nr_meta_pages = info->pages - info->image_pages - 1;
1792 	}
1793 	return error;
1794 }
1795 
1796 /**
1797  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1798  *	the corresponding bit in the memory bitmap @bm
1799  */
1800 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1801 {
1802 	int j;
1803 
1804 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1805 		if (unlikely(buf[j] == BM_END_OF_MAP))
1806 			break;
1807 
1808 		if (memory_bm_pfn_present(bm, buf[j]))
1809 			memory_bm_set_bit(bm, buf[j]);
1810 		else
1811 			return -EFAULT;
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 /* List of "safe" pages that may be used to store data loaded from the suspend
1818  * image
1819  */
1820 static struct linked_page *safe_pages_list;
1821 
1822 #ifdef CONFIG_HIGHMEM
1823 /* struct highmem_pbe is used for creating the list of highmem pages that
1824  * should be restored atomically during the resume from disk, because the page
1825  * frames they have occupied before the suspend are in use.
1826  */
1827 struct highmem_pbe {
1828 	struct page *copy_page;	/* data is here now */
1829 	struct page *orig_page;	/* data was here before the suspend */
1830 	struct highmem_pbe *next;
1831 };
1832 
1833 /* List of highmem PBEs needed for restoring the highmem pages that were
1834  * allocated before the suspend and included in the suspend image, but have
1835  * also been allocated by the "resume" kernel, so their contents cannot be
1836  * written directly to their "original" page frames.
1837  */
1838 static struct highmem_pbe *highmem_pblist;
1839 
1840 /**
1841  *	count_highmem_image_pages - compute the number of highmem pages in the
1842  *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1843  *	image pages are assumed to be set.
1844  */
1845 
1846 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1847 {
1848 	unsigned long pfn;
1849 	unsigned int cnt = 0;
1850 
1851 	memory_bm_position_reset(bm);
1852 	pfn = memory_bm_next_pfn(bm);
1853 	while (pfn != BM_END_OF_MAP) {
1854 		if (PageHighMem(pfn_to_page(pfn)))
1855 			cnt++;
1856 
1857 		pfn = memory_bm_next_pfn(bm);
1858 	}
1859 	return cnt;
1860 }
1861 
1862 /**
1863  *	prepare_highmem_image - try to allocate as many highmem pages as
1864  *	there are highmem image pages (@nr_highmem_p points to the variable
1865  *	containing the number of highmem image pages).  The pages that are
1866  *	"safe" (ie. will not be overwritten when the suspend image is
1867  *	restored) have the corresponding bits set in @bm (it must be
1868  *	unitialized).
1869  *
1870  *	NOTE: This function should not be called if there are no highmem
1871  *	image pages.
1872  */
1873 
1874 static unsigned int safe_highmem_pages;
1875 
1876 static struct memory_bitmap *safe_highmem_bm;
1877 
1878 static int
1879 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1880 {
1881 	unsigned int to_alloc;
1882 
1883 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1884 		return -ENOMEM;
1885 
1886 	if (get_highmem_buffer(PG_SAFE))
1887 		return -ENOMEM;
1888 
1889 	to_alloc = count_free_highmem_pages();
1890 	if (to_alloc > *nr_highmem_p)
1891 		to_alloc = *nr_highmem_p;
1892 	else
1893 		*nr_highmem_p = to_alloc;
1894 
1895 	safe_highmem_pages = 0;
1896 	while (to_alloc-- > 0) {
1897 		struct page *page;
1898 
1899 		page = alloc_page(__GFP_HIGHMEM);
1900 		if (!swsusp_page_is_free(page)) {
1901 			/* The page is "safe", set its bit the bitmap */
1902 			memory_bm_set_bit(bm, page_to_pfn(page));
1903 			safe_highmem_pages++;
1904 		}
1905 		/* Mark the page as allocated */
1906 		swsusp_set_page_forbidden(page);
1907 		swsusp_set_page_free(page);
1908 	}
1909 	memory_bm_position_reset(bm);
1910 	safe_highmem_bm = bm;
1911 	return 0;
1912 }
1913 
1914 /**
1915  *	get_highmem_page_buffer - for given highmem image page find the buffer
1916  *	that suspend_write_next() should set for its caller to write to.
1917  *
1918  *	If the page is to be saved to its "original" page frame or a copy of
1919  *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1920  *	the copy of the page is to be made in normal memory, so the address of
1921  *	the copy is returned.
1922  *
1923  *	If @buffer is returned, the caller of suspend_write_next() will write
1924  *	the page's contents to @buffer, so they will have to be copied to the
1925  *	right location on the next call to suspend_write_next() and it is done
1926  *	with the help of copy_last_highmem_page().  For this purpose, if
1927  *	@buffer is returned, @last_highmem page is set to the page to which
1928  *	the data will have to be copied from @buffer.
1929  */
1930 
1931 static struct page *last_highmem_page;
1932 
1933 static void *
1934 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1935 {
1936 	struct highmem_pbe *pbe;
1937 	void *kaddr;
1938 
1939 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1940 		/* We have allocated the "original" page frame and we can
1941 		 * use it directly to store the loaded page.
1942 		 */
1943 		last_highmem_page = page;
1944 		return buffer;
1945 	}
1946 	/* The "original" page frame has not been allocated and we have to
1947 	 * use a "safe" page frame to store the loaded page.
1948 	 */
1949 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1950 	if (!pbe) {
1951 		swsusp_free();
1952 		return ERR_PTR(-ENOMEM);
1953 	}
1954 	pbe->orig_page = page;
1955 	if (safe_highmem_pages > 0) {
1956 		struct page *tmp;
1957 
1958 		/* Copy of the page will be stored in high memory */
1959 		kaddr = buffer;
1960 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1961 		safe_highmem_pages--;
1962 		last_highmem_page = tmp;
1963 		pbe->copy_page = tmp;
1964 	} else {
1965 		/* Copy of the page will be stored in normal memory */
1966 		kaddr = safe_pages_list;
1967 		safe_pages_list = safe_pages_list->next;
1968 		pbe->copy_page = virt_to_page(kaddr);
1969 	}
1970 	pbe->next = highmem_pblist;
1971 	highmem_pblist = pbe;
1972 	return kaddr;
1973 }
1974 
1975 /**
1976  *	copy_last_highmem_page - copy the contents of a highmem image from
1977  *	@buffer, where the caller of snapshot_write_next() has place them,
1978  *	to the right location represented by @last_highmem_page .
1979  */
1980 
1981 static void copy_last_highmem_page(void)
1982 {
1983 	if (last_highmem_page) {
1984 		void *dst;
1985 
1986 		dst = kmap_atomic(last_highmem_page, KM_USER0);
1987 		copy_page(dst, buffer);
1988 		kunmap_atomic(dst, KM_USER0);
1989 		last_highmem_page = NULL;
1990 	}
1991 }
1992 
1993 static inline int last_highmem_page_copied(void)
1994 {
1995 	return !last_highmem_page;
1996 }
1997 
1998 static inline void free_highmem_data(void)
1999 {
2000 	if (safe_highmem_bm)
2001 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2002 
2003 	if (buffer)
2004 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2005 }
2006 #else
2007 static inline int get_safe_write_buffer(void) { return 0; }
2008 
2009 static unsigned int
2010 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2011 
2012 static inline int
2013 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2014 {
2015 	return 0;
2016 }
2017 
2018 static inline void *
2019 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2020 {
2021 	return ERR_PTR(-EINVAL);
2022 }
2023 
2024 static inline void copy_last_highmem_page(void) {}
2025 static inline int last_highmem_page_copied(void) { return 1; }
2026 static inline void free_highmem_data(void) {}
2027 #endif /* CONFIG_HIGHMEM */
2028 
2029 /**
2030  *	prepare_image - use the memory bitmap @bm to mark the pages that will
2031  *	be overwritten in the process of restoring the system memory state
2032  *	from the suspend image ("unsafe" pages) and allocate memory for the
2033  *	image.
2034  *
2035  *	The idea is to allocate a new memory bitmap first and then allocate
2036  *	as many pages as needed for the image data, but not to assign these
2037  *	pages to specific tasks initially.  Instead, we just mark them as
2038  *	allocated and create a lists of "safe" pages that will be used
2039  *	later.  On systems with high memory a list of "safe" highmem pages is
2040  *	also created.
2041  */
2042 
2043 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2044 
2045 static int
2046 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2047 {
2048 	unsigned int nr_pages, nr_highmem;
2049 	struct linked_page *sp_list, *lp;
2050 	int error;
2051 
2052 	/* If there is no highmem, the buffer will not be necessary */
2053 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2054 	buffer = NULL;
2055 
2056 	nr_highmem = count_highmem_image_pages(bm);
2057 	error = mark_unsafe_pages(bm);
2058 	if (error)
2059 		goto Free;
2060 
2061 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2062 	if (error)
2063 		goto Free;
2064 
2065 	duplicate_memory_bitmap(new_bm, bm);
2066 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2067 	if (nr_highmem > 0) {
2068 		error = prepare_highmem_image(bm, &nr_highmem);
2069 		if (error)
2070 			goto Free;
2071 	}
2072 	/* Reserve some safe pages for potential later use.
2073 	 *
2074 	 * NOTE: This way we make sure there will be enough safe pages for the
2075 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2076 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2077 	 */
2078 	sp_list = NULL;
2079 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2080 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2081 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2082 	while (nr_pages > 0) {
2083 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2084 		if (!lp) {
2085 			error = -ENOMEM;
2086 			goto Free;
2087 		}
2088 		lp->next = sp_list;
2089 		sp_list = lp;
2090 		nr_pages--;
2091 	}
2092 	/* Preallocate memory for the image */
2093 	safe_pages_list = NULL;
2094 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2095 	while (nr_pages > 0) {
2096 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2097 		if (!lp) {
2098 			error = -ENOMEM;
2099 			goto Free;
2100 		}
2101 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2102 			/* The page is "safe", add it to the list */
2103 			lp->next = safe_pages_list;
2104 			safe_pages_list = lp;
2105 		}
2106 		/* Mark the page as allocated */
2107 		swsusp_set_page_forbidden(virt_to_page(lp));
2108 		swsusp_set_page_free(virt_to_page(lp));
2109 		nr_pages--;
2110 	}
2111 	/* Free the reserved safe pages so that chain_alloc() can use them */
2112 	while (sp_list) {
2113 		lp = sp_list->next;
2114 		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2115 		sp_list = lp;
2116 	}
2117 	return 0;
2118 
2119  Free:
2120 	swsusp_free();
2121 	return error;
2122 }
2123 
2124 /**
2125  *	get_buffer - compute the address that snapshot_write_next() should
2126  *	set for its caller to write to.
2127  */
2128 
2129 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2130 {
2131 	struct pbe *pbe;
2132 	struct page *page;
2133 	unsigned long pfn = memory_bm_next_pfn(bm);
2134 
2135 	if (pfn == BM_END_OF_MAP)
2136 		return ERR_PTR(-EFAULT);
2137 
2138 	page = pfn_to_page(pfn);
2139 	if (PageHighMem(page))
2140 		return get_highmem_page_buffer(page, ca);
2141 
2142 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2143 		/* We have allocated the "original" page frame and we can
2144 		 * use it directly to store the loaded page.
2145 		 */
2146 		return page_address(page);
2147 
2148 	/* The "original" page frame has not been allocated and we have to
2149 	 * use a "safe" page frame to store the loaded page.
2150 	 */
2151 	pbe = chain_alloc(ca, sizeof(struct pbe));
2152 	if (!pbe) {
2153 		swsusp_free();
2154 		return ERR_PTR(-ENOMEM);
2155 	}
2156 	pbe->orig_address = page_address(page);
2157 	pbe->address = safe_pages_list;
2158 	safe_pages_list = safe_pages_list->next;
2159 	pbe->next = restore_pblist;
2160 	restore_pblist = pbe;
2161 	return pbe->address;
2162 }
2163 
2164 /**
2165  *	snapshot_write_next - used for writing the system memory snapshot.
2166  *
2167  *	On the first call to it @handle should point to a zeroed
2168  *	snapshot_handle structure.  The structure gets updated and a pointer
2169  *	to it should be passed to this function every next time.
2170  *
2171  *	On success the function returns a positive number.  Then, the caller
2172  *	is allowed to write up to the returned number of bytes to the memory
2173  *	location computed by the data_of() macro.
2174  *
2175  *	The function returns 0 to indicate the "end of file" condition,
2176  *	and a negative number is returned on error.  In such cases the
2177  *	structure pointed to by @handle is not updated and should not be used
2178  *	any more.
2179  */
2180 
2181 int snapshot_write_next(struct snapshot_handle *handle)
2182 {
2183 	static struct chain_allocator ca;
2184 	int error = 0;
2185 
2186 	/* Check if we have already loaded the entire image */
2187 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2188 		return 0;
2189 
2190 	handle->sync_read = 1;
2191 
2192 	if (!handle->cur) {
2193 		if (!buffer)
2194 			/* This makes the buffer be freed by swsusp_free() */
2195 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2196 
2197 		if (!buffer)
2198 			return -ENOMEM;
2199 
2200 		handle->buffer = buffer;
2201 	} else if (handle->cur == 1) {
2202 		error = load_header(buffer);
2203 		if (error)
2204 			return error;
2205 
2206 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2207 		if (error)
2208 			return error;
2209 
2210 	} else if (handle->cur <= nr_meta_pages + 1) {
2211 		error = unpack_orig_pfns(buffer, &copy_bm);
2212 		if (error)
2213 			return error;
2214 
2215 		if (handle->cur == nr_meta_pages + 1) {
2216 			error = prepare_image(&orig_bm, &copy_bm);
2217 			if (error)
2218 				return error;
2219 
2220 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2221 			memory_bm_position_reset(&orig_bm);
2222 			restore_pblist = NULL;
2223 			handle->buffer = get_buffer(&orig_bm, &ca);
2224 			handle->sync_read = 0;
2225 			if (IS_ERR(handle->buffer))
2226 				return PTR_ERR(handle->buffer);
2227 		}
2228 	} else {
2229 		copy_last_highmem_page();
2230 		handle->buffer = get_buffer(&orig_bm, &ca);
2231 		if (IS_ERR(handle->buffer))
2232 			return PTR_ERR(handle->buffer);
2233 		if (handle->buffer != buffer)
2234 			handle->sync_read = 0;
2235 	}
2236 	handle->cur++;
2237 	return PAGE_SIZE;
2238 }
2239 
2240 /**
2241  *	snapshot_write_finalize - must be called after the last call to
2242  *	snapshot_write_next() in case the last page in the image happens
2243  *	to be a highmem page and its contents should be stored in the
2244  *	highmem.  Additionally, it releases the memory that will not be
2245  *	used any more.
2246  */
2247 
2248 void snapshot_write_finalize(struct snapshot_handle *handle)
2249 {
2250 	copy_last_highmem_page();
2251 	/* Free only if we have loaded the image entirely */
2252 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2253 		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2254 		free_highmem_data();
2255 	}
2256 }
2257 
2258 int snapshot_image_loaded(struct snapshot_handle *handle)
2259 {
2260 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2261 			handle->cur <= nr_meta_pages + nr_copy_pages);
2262 }
2263 
2264 #ifdef CONFIG_HIGHMEM
2265 /* Assumes that @buf is ready and points to a "safe" page */
2266 static inline void
2267 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2268 {
2269 	void *kaddr1, *kaddr2;
2270 
2271 	kaddr1 = kmap_atomic(p1, KM_USER0);
2272 	kaddr2 = kmap_atomic(p2, KM_USER1);
2273 	copy_page(buf, kaddr1);
2274 	copy_page(kaddr1, kaddr2);
2275 	copy_page(kaddr2, buf);
2276 	kunmap_atomic(kaddr2, KM_USER1);
2277 	kunmap_atomic(kaddr1, KM_USER0);
2278 }
2279 
2280 /**
2281  *	restore_highmem - for each highmem page that was allocated before
2282  *	the suspend and included in the suspend image, and also has been
2283  *	allocated by the "resume" kernel swap its current (ie. "before
2284  *	resume") contents with the previous (ie. "before suspend") one.
2285  *
2286  *	If the resume eventually fails, we can call this function once
2287  *	again and restore the "before resume" highmem state.
2288  */
2289 
2290 int restore_highmem(void)
2291 {
2292 	struct highmem_pbe *pbe = highmem_pblist;
2293 	void *buf;
2294 
2295 	if (!pbe)
2296 		return 0;
2297 
2298 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2299 	if (!buf)
2300 		return -ENOMEM;
2301 
2302 	while (pbe) {
2303 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2304 		pbe = pbe->next;
2305 	}
2306 	free_image_page(buf, PG_UNSAFE_CLEAR);
2307 	return 0;
2308 }
2309 #endif /* CONFIG_HIGHMEM */
2310