xref: /openbmc/linux/arch/x86/power/hibernate_32.c (revision 168b6511)
1 /*
2  * Hibernation support specific for i386 - temporary page tables
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
7  */
8 
9 #include <linux/gfp.h>
10 #include <linux/suspend.h>
11 #include <linux/bootmem.h>
12 
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/mmzone.h>
16 #include <asm/sections.h>
17 
18 /* Defined in hibernate_asm_32.S */
19 extern int restore_image(void);
20 
21 /* Pointer to the temporary resume page tables */
22 pgd_t *resume_pg_dir;
23 
24 /* The following three functions are based on the analogous code in
25  * arch/x86/mm/init_32.c
26  */
27 
28 /*
29  * Create a middle page table on a resume-safe page and put a pointer to it in
30  * the given global directory entry.  This only returns the gd entry
31  * in non-PAE compilation mode, since the middle layer is folded.
32  */
33 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
34 {
35 	p4d_t *p4d;
36 	pud_t *pud;
37 	pmd_t *pmd_table;
38 
39 #ifdef CONFIG_X86_PAE
40 	pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
41 	if (!pmd_table)
42 		return NULL;
43 
44 	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
45 	p4d = p4d_offset(pgd, 0);
46 	pud = pud_offset(p4d, 0);
47 
48 	BUG_ON(pmd_table != pmd_offset(pud, 0));
49 #else
50 	p4d = p4d_offset(pgd, 0);
51 	pud = pud_offset(p4d, 0);
52 	pmd_table = pmd_offset(pud, 0);
53 #endif
54 
55 	return pmd_table;
56 }
57 
58 /*
59  * Create a page table on a resume-safe page and place a pointer to it in
60  * a middle page directory entry.
61  */
62 static pte_t *resume_one_page_table_init(pmd_t *pmd)
63 {
64 	if (pmd_none(*pmd)) {
65 		pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
66 		if (!page_table)
67 			return NULL;
68 
69 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
70 
71 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
72 
73 		return page_table;
74 	}
75 
76 	return pte_offset_kernel(pmd, 0);
77 }
78 
79 /*
80  * This maps the physical memory to kernel virtual address space, a total
81  * of max_low_pfn pages, by creating page tables starting from address
82  * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
83  */
84 static int resume_physical_mapping_init(pgd_t *pgd_base)
85 {
86 	unsigned long pfn;
87 	pgd_t *pgd;
88 	pmd_t *pmd;
89 	pte_t *pte;
90 	int pgd_idx, pmd_idx;
91 
92 	pgd_idx = pgd_index(PAGE_OFFSET);
93 	pgd = pgd_base + pgd_idx;
94 	pfn = 0;
95 
96 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
97 		pmd = resume_one_md_table_init(pgd);
98 		if (!pmd)
99 			return -ENOMEM;
100 
101 		if (pfn >= max_low_pfn)
102 			continue;
103 
104 		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
105 			if (pfn >= max_low_pfn)
106 				break;
107 
108 			/* Map with big pages if possible, otherwise create
109 			 * normal page tables.
110 			 * NOTE: We can mark everything as executable here
111 			 */
112 			if (boot_cpu_has(X86_FEATURE_PSE)) {
113 				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
114 				pfn += PTRS_PER_PTE;
115 			} else {
116 				pte_t *max_pte;
117 
118 				pte = resume_one_page_table_init(pmd);
119 				if (!pte)
120 					return -ENOMEM;
121 
122 				max_pte = pte + PTRS_PER_PTE;
123 				for (; pte < max_pte; pte++, pfn++) {
124 					if (pfn >= max_low_pfn)
125 						break;
126 
127 					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
128 				}
129 			}
130 		}
131 	}
132 
133 	return 0;
134 }
135 
136 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
137 {
138 #ifdef CONFIG_X86_PAE
139 	int i;
140 
141 	/* Init entries of the first-level page table to the zero page */
142 	for (i = 0; i < PTRS_PER_PGD; i++)
143 		set_pgd(pg_dir + i,
144 			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
145 #endif
146 }
147 
148 asmlinkage int swsusp_arch_resume(void)
149 {
150 	int error;
151 
152 	resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
153 	if (!resume_pg_dir)
154 		return -ENOMEM;
155 
156 	resume_init_first_level_page_table(resume_pg_dir);
157 	error = resume_physical_mapping_init(resume_pg_dir);
158 	if (error)
159 		return error;
160 
161 	/* We have got enough memory and from now on we cannot recover */
162 	restore_image();
163 	return 0;
164 }
165 
166 /*
167  *	pfn_is_nosave - check if given pfn is in the 'nosave' section
168  */
169 
170 int pfn_is_nosave(unsigned long pfn)
171 {
172 	unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
173 	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
174 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
175 }
176