xref: /openbmc/linux/arch/x86/power/hibernate_64.c (revision 8e8e69d6)
1 /*
2  * Hibernation support for x86-64
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9  */
10 
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/scatterlist.h>
15 #include <linux/kdebug.h>
16 
17 #include <crypto/hash.h>
18 
19 #include <asm/e820/api.h>
20 #include <asm/init.h>
21 #include <asm/proto.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/mtrr.h>
25 #include <asm/sections.h>
26 #include <asm/suspend.h>
27 #include <asm/tlbflush.h>
28 
29 static int set_up_temporary_text_mapping(pgd_t *pgd)
30 {
31 	pmd_t *pmd;
32 	pud_t *pud;
33 	p4d_t *p4d = NULL;
34 	pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
35 	pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
36 
37 	/* Filter out unsupported __PAGE_KERNEL* bits: */
38 	pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
39 	pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;
40 
41 	/*
42 	 * The new mapping only has to cover the page containing the image
43 	 * kernel's entry point (jump_address_phys), because the switch over to
44 	 * it is carried out by relocated code running from a page allocated
45 	 * specifically for this purpose and covered by the identity mapping, so
46 	 * the temporary kernel text mapping is only needed for the final jump.
47 	 * Moreover, in that mapping the virtual address of the image kernel's
48 	 * entry point must be the same as its virtual address in the image
49 	 * kernel (restore_jump_address), so the image kernel's
50 	 * restore_registers() code doesn't find itself in a different area of
51 	 * the virtual address space after switching over to the original page
52 	 * tables used by the image kernel.
53 	 */
54 
55 	if (pgtable_l5_enabled()) {
56 		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
57 		if (!p4d)
58 			return -ENOMEM;
59 	}
60 
61 	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
62 	if (!pud)
63 		return -ENOMEM;
64 
65 	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
66 	if (!pmd)
67 		return -ENOMEM;
68 
69 	set_pmd(pmd + pmd_index(restore_jump_address),
70 		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
71 	set_pud(pud + pud_index(restore_jump_address),
72 		__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
73 	if (p4d) {
74 		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
75 		pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
76 
77 		set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
78 		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
79 	} else {
80 		/* No p4d for 4-level paging: point the pgd to the pud page table */
81 		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
82 		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
83 	}
84 
85 	return 0;
86 }
87 
88 static void *alloc_pgt_page(void *context)
89 {
90 	return (void *)get_safe_page(GFP_ATOMIC);
91 }
92 
93 static int set_up_temporary_mappings(void)
94 {
95 	struct x86_mapping_info info = {
96 		.alloc_pgt_page	= alloc_pgt_page,
97 		.page_flag	= __PAGE_KERNEL_LARGE_EXEC,
98 		.offset		= __PAGE_OFFSET,
99 	};
100 	unsigned long mstart, mend;
101 	pgd_t *pgd;
102 	int result;
103 	int i;
104 
105 	pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
106 	if (!pgd)
107 		return -ENOMEM;
108 
109 	/* Prepare a temporary mapping for the kernel text */
110 	result = set_up_temporary_text_mapping(pgd);
111 	if (result)
112 		return result;
113 
114 	/* Set up the direct mapping from scratch */
115 	for (i = 0; i < nr_pfn_mapped; i++) {
116 		mstart = pfn_mapped[i].start << PAGE_SHIFT;
117 		mend   = pfn_mapped[i].end << PAGE_SHIFT;
118 
119 		result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
120 		if (result)
121 			return result;
122 	}
123 
124 	temp_pgt = __pa(pgd);
125 	return 0;
126 }
127 
128 asmlinkage int swsusp_arch_resume(void)
129 {
130 	int error;
131 
132 	/* We have got enough memory and from now on we cannot recover */
133 	error = set_up_temporary_mappings();
134 	if (error)
135 		return error;
136 
137 	error = relocate_restore_code();
138 	if (error)
139 		return error;
140 
141 	restore_image();
142 	return 0;
143 }
144