1 /*
2  * handle transition of Linux booting another kernel
3  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <linux/numa.h>
14 #include <linux/ftrace.h>
15 
16 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
18 #include <asm/mmu_context.h>
19 #include <asm/io.h>
20 
21 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
22 static u64 kexec_pgd[512] PAGE_ALIGNED;
23 static u64 kexec_pud0[512] PAGE_ALIGNED;
24 static u64 kexec_pmd0[512] PAGE_ALIGNED;
25 static u64 kexec_pte0[512] PAGE_ALIGNED;
26 static u64 kexec_pud1[512] PAGE_ALIGNED;
27 static u64 kexec_pmd1[512] PAGE_ALIGNED;
28 static u64 kexec_pte1[512] PAGE_ALIGNED;
29 
30 static void init_level2_page(pmd_t *level2p, unsigned long addr)
31 {
32 	unsigned long end_addr;
33 
34 	addr &= PAGE_MASK;
35 	end_addr = addr + PUD_SIZE;
36 	while (addr < end_addr) {
37 		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
38 		addr += PMD_SIZE;
39 	}
40 }
41 
42 static int init_level3_page(struct kimage *image, pud_t *level3p,
43 				unsigned long addr, unsigned long last_addr)
44 {
45 	unsigned long end_addr;
46 	int result;
47 
48 	result = 0;
49 	addr &= PAGE_MASK;
50 	end_addr = addr + PGDIR_SIZE;
51 	while ((addr < last_addr) && (addr < end_addr)) {
52 		struct page *page;
53 		pmd_t *level2p;
54 
55 		page = kimage_alloc_control_pages(image, 0);
56 		if (!page) {
57 			result = -ENOMEM;
58 			goto out;
59 		}
60 		level2p = (pmd_t *)page_address(page);
61 		init_level2_page(level2p, addr);
62 		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
63 		addr += PUD_SIZE;
64 	}
65 	/* clear the unused entries */
66 	while (addr < end_addr) {
67 		pud_clear(level3p++);
68 		addr += PUD_SIZE;
69 	}
70 out:
71 	return result;
72 }
73 
74 
75 static int init_level4_page(struct kimage *image, pgd_t *level4p,
76 				unsigned long addr, unsigned long last_addr)
77 {
78 	unsigned long end_addr;
79 	int result;
80 
81 	result = 0;
82 	addr &= PAGE_MASK;
83 	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
84 	while ((addr < last_addr) && (addr < end_addr)) {
85 		struct page *page;
86 		pud_t *level3p;
87 
88 		page = kimage_alloc_control_pages(image, 0);
89 		if (!page) {
90 			result = -ENOMEM;
91 			goto out;
92 		}
93 		level3p = (pud_t *)page_address(page);
94 		result = init_level3_page(image, level3p, addr, last_addr);
95 		if (result) {
96 			goto out;
97 		}
98 		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
99 		addr += PGDIR_SIZE;
100 	}
101 	/* clear the unused entries */
102 	while (addr < end_addr) {
103 		pgd_clear(level4p++);
104 		addr += PGDIR_SIZE;
105 	}
106 out:
107 	return result;
108 }
109 
110 
111 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
112 {
113 	pgd_t *level4p;
114 	level4p = (pgd_t *)__va(start_pgtable);
115 	return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
116 }
117 
118 static void set_idt(void *newidt, u16 limit)
119 {
120 	struct desc_ptr curidt;
121 
122 	/* x86-64 supports unaliged loads & stores */
123 	curidt.size    = limit;
124 	curidt.address = (unsigned long)newidt;
125 
126 	__asm__ __volatile__ (
127 		"lidtq %0\n"
128 		: : "m" (curidt)
129 		);
130 };
131 
132 
133 static void set_gdt(void *newgdt, u16 limit)
134 {
135 	struct desc_ptr curgdt;
136 
137 	/* x86-64 supports unaligned loads & stores */
138 	curgdt.size    = limit;
139 	curgdt.address = (unsigned long)newgdt;
140 
141 	__asm__ __volatile__ (
142 		"lgdtq %0\n"
143 		: : "m" (curgdt)
144 		);
145 };
146 
147 static void load_segments(void)
148 {
149 	__asm__ __volatile__ (
150 		"\tmovl %0,%%ds\n"
151 		"\tmovl %0,%%es\n"
152 		"\tmovl %0,%%ss\n"
153 		"\tmovl %0,%%fs\n"
154 		"\tmovl %0,%%gs\n"
155 		: : "a" (__KERNEL_DS) : "memory"
156 		);
157 }
158 
159 int machine_kexec_prepare(struct kimage *image)
160 {
161 	unsigned long start_pgtable;
162 	int result;
163 
164 	/* Calculate the offsets */
165 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
166 
167 	/* Setup the identity mapped 64bit page table */
168 	result = init_pgtable(image, start_pgtable);
169 	if (result)
170 		return result;
171 
172 	return 0;
173 }
174 
175 void machine_kexec_cleanup(struct kimage *image)
176 {
177 	return;
178 }
179 
180 /*
181  * Do not allocate memory (or fail in any way) in machine_kexec().
182  * We are past the point of no return, committed to rebooting now.
183  */
184 NORET_TYPE void machine_kexec(struct kimage *image)
185 {
186 	unsigned long page_list[PAGES_NR];
187 	void *control_page;
188 
189 	tracer_disable();
190 
191 	/* Interrupts aren't acceptable while we reboot */
192 	local_irq_disable();
193 
194 	control_page = page_address(image->control_code_page) + PAGE_SIZE;
195 	memcpy(control_page, relocate_kernel, PAGE_SIZE);
196 
197 	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
198 	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
199 	page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
200 	page_list[VA_PGD] = (unsigned long)kexec_pgd;
201 	page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
202 	page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
203 	page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
204 	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
205 	page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
206 	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
207 	page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
208 	page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
209 	page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
210 	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
211 	page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
212 	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
213 
214 	page_list[PA_TABLE_PAGE] =
215 	  (unsigned long)__pa(page_address(image->control_code_page));
216 
217 	/* The segment registers are funny things, they have both a
218 	 * visible and an invisible part.  Whenever the visible part is
219 	 * set to a specific selector, the invisible part is loaded
220 	 * with from a table in memory.  At no other time is the
221 	 * descriptor table in memory accessed.
222 	 *
223 	 * I take advantage of this here by force loading the
224 	 * segments, before I zap the gdt with an invalid value.
225 	 */
226 	load_segments();
227 	/* The gdt & idt are now invalid.
228 	 * If you want to load them you must set up your own idt & gdt.
229 	 */
230 	set_gdt(phys_to_virt(0),0);
231 	set_idt(phys_to_virt(0),0);
232 
233 	/* now call it */
234 	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
235 			image->start);
236 }
237 
238 void arch_crash_save_vmcoreinfo(void)
239 {
240 	VMCOREINFO_SYMBOL(phys_base);
241 	VMCOREINFO_SYMBOL(init_level4_pgt);
242 
243 #ifdef CONFIG_NUMA
244 	VMCOREINFO_SYMBOL(node_data);
245 	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
246 #endif
247 }
248 
249