1 /*
2  * handle transition of Linux booting another kernel
3  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <linux/numa.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/io.h>
18 
19 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
20 static u64 kexec_pgd[512] PAGE_ALIGNED;
21 static u64 kexec_pud0[512] PAGE_ALIGNED;
22 static u64 kexec_pmd0[512] PAGE_ALIGNED;
23 static u64 kexec_pte0[512] PAGE_ALIGNED;
24 static u64 kexec_pud1[512] PAGE_ALIGNED;
25 static u64 kexec_pmd1[512] PAGE_ALIGNED;
26 static u64 kexec_pte1[512] PAGE_ALIGNED;
27 
28 static void init_level2_page(pmd_t *level2p, unsigned long addr)
29 {
30 	unsigned long end_addr;
31 
32 	addr &= PAGE_MASK;
33 	end_addr = addr + PUD_SIZE;
34 	while (addr < end_addr) {
35 		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
36 		addr += PMD_SIZE;
37 	}
38 }
39 
40 static int init_level3_page(struct kimage *image, pud_t *level3p,
41 				unsigned long addr, unsigned long last_addr)
42 {
43 	unsigned long end_addr;
44 	int result;
45 
46 	result = 0;
47 	addr &= PAGE_MASK;
48 	end_addr = addr + PGDIR_SIZE;
49 	while ((addr < last_addr) && (addr < end_addr)) {
50 		struct page *page;
51 		pmd_t *level2p;
52 
53 		page = kimage_alloc_control_pages(image, 0);
54 		if (!page) {
55 			result = -ENOMEM;
56 			goto out;
57 		}
58 		level2p = (pmd_t *)page_address(page);
59 		init_level2_page(level2p, addr);
60 		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
61 		addr += PUD_SIZE;
62 	}
63 	/* clear the unused entries */
64 	while (addr < end_addr) {
65 		pud_clear(level3p++);
66 		addr += PUD_SIZE;
67 	}
68 out:
69 	return result;
70 }
71 
72 
73 static int init_level4_page(struct kimage *image, pgd_t *level4p,
74 				unsigned long addr, unsigned long last_addr)
75 {
76 	unsigned long end_addr;
77 	int result;
78 
79 	result = 0;
80 	addr &= PAGE_MASK;
81 	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
82 	while ((addr < last_addr) && (addr < end_addr)) {
83 		struct page *page;
84 		pud_t *level3p;
85 
86 		page = kimage_alloc_control_pages(image, 0);
87 		if (!page) {
88 			result = -ENOMEM;
89 			goto out;
90 		}
91 		level3p = (pud_t *)page_address(page);
92 		result = init_level3_page(image, level3p, addr, last_addr);
93 		if (result) {
94 			goto out;
95 		}
96 		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
97 		addr += PGDIR_SIZE;
98 	}
99 	/* clear the unused entries */
100 	while (addr < end_addr) {
101 		pgd_clear(level4p++);
102 		addr += PGDIR_SIZE;
103 	}
104 out:
105 	return result;
106 }
107 
108 
109 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
110 {
111 	pgd_t *level4p;
112 	level4p = (pgd_t *)__va(start_pgtable);
113  	return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
114 }
115 
116 static void set_idt(void *newidt, u16 limit)
117 {
118 	struct desc_ptr curidt;
119 
120 	/* x86-64 supports unaliged loads & stores */
121 	curidt.size    = limit;
122 	curidt.address = (unsigned long)newidt;
123 
124 	__asm__ __volatile__ (
125 		"lidtq %0\n"
126 		: : "m" (curidt)
127 		);
128 };
129 
130 
131 static void set_gdt(void *newgdt, u16 limit)
132 {
133 	struct desc_ptr curgdt;
134 
135 	/* x86-64 supports unaligned loads & stores */
136 	curgdt.size    = limit;
137 	curgdt.address = (unsigned long)newgdt;
138 
139 	__asm__ __volatile__ (
140 		"lgdtq %0\n"
141 		: : "m" (curgdt)
142 		);
143 };
144 
145 static void load_segments(void)
146 {
147 	__asm__ __volatile__ (
148 		"\tmovl %0,%%ds\n"
149 		"\tmovl %0,%%es\n"
150 		"\tmovl %0,%%ss\n"
151 		"\tmovl %0,%%fs\n"
152 		"\tmovl %0,%%gs\n"
153 		: : "a" (__KERNEL_DS) : "memory"
154 		);
155 }
156 
157 int machine_kexec_prepare(struct kimage *image)
158 {
159 	unsigned long start_pgtable;
160 	int result;
161 
162 	/* Calculate the offsets */
163 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
164 
165 	/* Setup the identity mapped 64bit page table */
166 	result = init_pgtable(image, start_pgtable);
167 	if (result)
168 		return result;
169 
170 	return 0;
171 }
172 
173 void machine_kexec_cleanup(struct kimage *image)
174 {
175 	return;
176 }
177 
178 /*
179  * Do not allocate memory (or fail in any way) in machine_kexec().
180  * We are past the point of no return, committed to rebooting now.
181  */
182 NORET_TYPE void machine_kexec(struct kimage *image)
183 {
184 	unsigned long page_list[PAGES_NR];
185 	void *control_page;
186 
187 	/* Interrupts aren't acceptable while we reboot */
188 	local_irq_disable();
189 
190 	control_page = page_address(image->control_code_page) + PAGE_SIZE;
191 	memcpy(control_page, relocate_kernel, PAGE_SIZE);
192 
193 	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
194 	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
195 	page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
196 	page_list[VA_PGD] = (unsigned long)kexec_pgd;
197 	page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
198 	page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
199 	page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
200 	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
201 	page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
202 	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
203 	page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
204 	page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
205 	page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
206 	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
207 	page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
208 	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
209 
210 	page_list[PA_TABLE_PAGE] =
211 	  (unsigned long)__pa(page_address(image->control_code_page));
212 
213 	/* The segment registers are funny things, they have both a
214 	 * visible and an invisible part.  Whenever the visible part is
215 	 * set to a specific selector, the invisible part is loaded
216 	 * with from a table in memory.  At no other time is the
217 	 * descriptor table in memory accessed.
218 	 *
219 	 * I take advantage of this here by force loading the
220 	 * segments, before I zap the gdt with an invalid value.
221 	 */
222 	load_segments();
223 	/* The gdt & idt are now invalid.
224 	 * If you want to load them you must set up your own idt & gdt.
225 	 */
226 	set_gdt(phys_to_virt(0),0);
227 	set_idt(phys_to_virt(0),0);
228 
229 	/* now call it */
230 	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
231 			image->start);
232 }
233 
234 void arch_crash_save_vmcoreinfo(void)
235 {
236 	VMCOREINFO_SYMBOL(phys_base);
237 	VMCOREINFO_SYMBOL(init_level4_pgt);
238 
239 #ifdef CONFIG_NUMA
240 	VMCOREINFO_SYMBOL(node_data);
241 	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
242 #endif
243 }
244 
245