xref: /openbmc/linux/arch/arm64/kernel/hibernate.c (revision 77a87824)
1 /*:
2  * Hibernate support specific for ARM64
3  *
4  * Derived from work on ARM hibernation support by:
5  *
6  * Ubuntu project, hibernation support for mach-dove
7  * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8  * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9  *  https://lkml.org/lkml/2010/6/18/4
10  *  https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11  *  https://patchwork.kernel.org/patch/96442/
12  *
13  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
14  *
15  * License terms: GNU General Public License (GPL) version 2
16  */
17 #define pr_fmt(x) "hibernate: " x
18 #include <linux/kvm_host.h>
19 #include <linux/mm.h>
20 #include <linux/notifier.h>
21 #include <linux/pm.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/utsname.h>
25 #include <linux/version.h>
26 
27 #include <asm/barrier.h>
28 #include <asm/cacheflush.h>
29 #include <asm/irqflags.h>
30 #include <asm/memory.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgtable-hwdef.h>
35 #include <asm/sections.h>
36 #include <asm/smp.h>
37 #include <asm/suspend.h>
38 #include <asm/virt.h>
39 
40 /*
41  * Hibernate core relies on this value being 0 on resume, and marks it
42  * __nosavedata assuming it will keep the resume kernel's '0' value. This
43  * doesn't happen with either KASLR.
44  *
45  * defined as "__visible int in_suspend __nosavedata" in
46  * kernel/power/hibernate.c
47  */
48 extern int in_suspend;
49 
50 /* Find a symbols alias in the linear map */
51 #define LMADDR(x)	phys_to_virt(virt_to_phys(x))
52 
53 /* Do we need to reset el2? */
54 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
55 
56 /*
57  * Start/end of the hibernate exit code, this must be copied to a 'safe'
58  * location in memory, and executed from there.
59  */
60 extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
61 
62 /* temporary el2 vectors in the __hibernate_exit_text section. */
63 extern char hibernate_el2_vectors[];
64 
65 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
66 extern char __hyp_stub_vectors[];
67 
68 /*
69  * Values that may not change over hibernate/resume. We put the build number
70  * and date in here so that we guarantee not to resume with a different
71  * kernel.
72  */
73 struct arch_hibernate_hdr_invariants {
74 	char		uts_version[__NEW_UTS_LEN + 1];
75 };
76 
77 /* These values need to be know across a hibernate/restore. */
78 static struct arch_hibernate_hdr {
79 	struct arch_hibernate_hdr_invariants invariants;
80 
81 	/* These are needed to find the relocated kernel if built with kaslr */
82 	phys_addr_t	ttbr1_el1;
83 	void		(*reenter_kernel)(void);
84 
85 	/*
86 	 * We need to know where the __hyp_stub_vectors are after restore to
87 	 * re-configure el2.
88 	 */
89 	phys_addr_t	__hyp_stub_vectors;
90 } resume_hdr;
91 
92 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
93 {
94 	memset(i, 0, sizeof(*i));
95 	memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
96 }
97 
98 int pfn_is_nosave(unsigned long pfn)
99 {
100 	unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
101 	unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
102 
103 	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
104 }
105 
106 void notrace save_processor_state(void)
107 {
108 	WARN_ON(num_online_cpus() != 1);
109 }
110 
111 void notrace restore_processor_state(void)
112 {
113 }
114 
115 int arch_hibernation_header_save(void *addr, unsigned int max_size)
116 {
117 	struct arch_hibernate_hdr *hdr = addr;
118 
119 	if (max_size < sizeof(*hdr))
120 		return -EOVERFLOW;
121 
122 	arch_hdr_invariants(&hdr->invariants);
123 	hdr->ttbr1_el1		= virt_to_phys(swapper_pg_dir);
124 	hdr->reenter_kernel	= _cpu_resume;
125 
126 	/* We can't use __hyp_get_vectors() because kvm may still be loaded */
127 	if (el2_reset_needed())
128 		hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
129 	else
130 		hdr->__hyp_stub_vectors = 0;
131 
132 	return 0;
133 }
134 EXPORT_SYMBOL(arch_hibernation_header_save);
135 
136 int arch_hibernation_header_restore(void *addr)
137 {
138 	struct arch_hibernate_hdr_invariants invariants;
139 	struct arch_hibernate_hdr *hdr = addr;
140 
141 	arch_hdr_invariants(&invariants);
142 	if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
143 		pr_crit("Hibernate image not generated by this kernel!\n");
144 		return -EINVAL;
145 	}
146 
147 	resume_hdr = *hdr;
148 
149 	return 0;
150 }
151 EXPORT_SYMBOL(arch_hibernation_header_restore);
152 
153 /*
154  * Copies length bytes, starting at src_start into an new page,
155  * perform cache maintentance, then maps it at the specified address low
156  * address as executable.
157  *
158  * This is used by hibernate to copy the code it needs to execute when
159  * overwriting the kernel text. This function generates a new set of page
160  * tables, which it loads into ttbr0.
161  *
162  * Length is provided as we probably only want 4K of data, even on a 64K
163  * page system.
164  */
165 static int create_safe_exec_page(void *src_start, size_t length,
166 				 unsigned long dst_addr,
167 				 phys_addr_t *phys_dst_addr,
168 				 void *(*allocator)(gfp_t mask),
169 				 gfp_t mask)
170 {
171 	int rc = 0;
172 	pgd_t *pgd;
173 	pud_t *pud;
174 	pmd_t *pmd;
175 	pte_t *pte;
176 	unsigned long dst = (unsigned long)allocator(mask);
177 
178 	if (!dst) {
179 		rc = -ENOMEM;
180 		goto out;
181 	}
182 
183 	memcpy((void *)dst, src_start, length);
184 	flush_icache_range(dst, dst + length);
185 
186 	pgd = pgd_offset_raw(allocator(mask), dst_addr);
187 	if (pgd_none(*pgd)) {
188 		pud = allocator(mask);
189 		if (!pud) {
190 			rc = -ENOMEM;
191 			goto out;
192 		}
193 		pgd_populate(&init_mm, pgd, pud);
194 	}
195 
196 	pud = pud_offset(pgd, dst_addr);
197 	if (pud_none(*pud)) {
198 		pmd = allocator(mask);
199 		if (!pmd) {
200 			rc = -ENOMEM;
201 			goto out;
202 		}
203 		pud_populate(&init_mm, pud, pmd);
204 	}
205 
206 	pmd = pmd_offset(pud, dst_addr);
207 	if (pmd_none(*pmd)) {
208 		pte = allocator(mask);
209 		if (!pte) {
210 			rc = -ENOMEM;
211 			goto out;
212 		}
213 		pmd_populate_kernel(&init_mm, pmd, pte);
214 	}
215 
216 	pte = pte_offset_kernel(pmd, dst_addr);
217 	set_pte(pte, __pte(virt_to_phys((void *)dst) |
218 			 pgprot_val(PAGE_KERNEL_EXEC)));
219 
220 	/* Load our new page tables */
221 	asm volatile("msr	ttbr0_el1, %0;"
222 		     "isb;"
223 		     "tlbi	vmalle1is;"
224 		     "dsb	ish;"
225 		     "isb" : : "r"(virt_to_phys(pgd)));
226 
227 	*phys_dst_addr = virt_to_phys((void *)dst);
228 
229 out:
230 	return rc;
231 }
232 
233 
234 int swsusp_arch_suspend(void)
235 {
236 	int ret = 0;
237 	unsigned long flags;
238 	struct sleep_stack_data state;
239 
240 	if (cpus_are_stuck_in_kernel()) {
241 		pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
242 		return -EBUSY;
243 	}
244 
245 	local_dbg_save(flags);
246 
247 	if (__cpu_suspend_enter(&state)) {
248 		ret = swsusp_save();
249 	} else {
250 		/* Clean kernel to PoC for secondary core startup */
251 		__flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
252 
253 		/*
254 		 * Tell the hibernation core that we've just restored
255 		 * the memory
256 		 */
257 		in_suspend = 0;
258 
259 		__cpu_suspend_exit();
260 	}
261 
262 	local_dbg_restore(flags);
263 
264 	return ret;
265 }
266 
267 static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
268 		    unsigned long end)
269 {
270 	pte_t *src_pte;
271 	pte_t *dst_pte;
272 	unsigned long addr = start;
273 
274 	dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
275 	if (!dst_pte)
276 		return -ENOMEM;
277 	pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
278 	dst_pte = pte_offset_kernel(dst_pmd, start);
279 
280 	src_pte = pte_offset_kernel(src_pmd, start);
281 	do {
282 		if (!pte_none(*src_pte))
283 			/*
284 			 * Resume will overwrite areas that may be marked
285 			 * read only (code, rodata). Clear the RDONLY bit from
286 			 * the temporary mappings we use during restore.
287 			 */
288 			set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
289 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
290 
291 	return 0;
292 }
293 
294 static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
295 		    unsigned long end)
296 {
297 	pmd_t *src_pmd;
298 	pmd_t *dst_pmd;
299 	unsigned long next;
300 	unsigned long addr = start;
301 
302 	if (pud_none(*dst_pud)) {
303 		dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
304 		if (!dst_pmd)
305 			return -ENOMEM;
306 		pud_populate(&init_mm, dst_pud, dst_pmd);
307 	}
308 	dst_pmd = pmd_offset(dst_pud, start);
309 
310 	src_pmd = pmd_offset(src_pud, start);
311 	do {
312 		next = pmd_addr_end(addr, end);
313 		if (pmd_none(*src_pmd))
314 			continue;
315 		if (pmd_table(*src_pmd)) {
316 			if (copy_pte(dst_pmd, src_pmd, addr, next))
317 				return -ENOMEM;
318 		} else {
319 			set_pmd(dst_pmd,
320 				__pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
321 		}
322 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
323 
324 	return 0;
325 }
326 
327 static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
328 		    unsigned long end)
329 {
330 	pud_t *dst_pud;
331 	pud_t *src_pud;
332 	unsigned long next;
333 	unsigned long addr = start;
334 
335 	if (pgd_none(*dst_pgd)) {
336 		dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
337 		if (!dst_pud)
338 			return -ENOMEM;
339 		pgd_populate(&init_mm, dst_pgd, dst_pud);
340 	}
341 	dst_pud = pud_offset(dst_pgd, start);
342 
343 	src_pud = pud_offset(src_pgd, start);
344 	do {
345 		next = pud_addr_end(addr, end);
346 		if (pud_none(*src_pud))
347 			continue;
348 		if (pud_table(*(src_pud))) {
349 			if (copy_pmd(dst_pud, src_pud, addr, next))
350 				return -ENOMEM;
351 		} else {
352 			set_pud(dst_pud,
353 				__pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
354 		}
355 	} while (dst_pud++, src_pud++, addr = next, addr != end);
356 
357 	return 0;
358 }
359 
360 static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
361 			    unsigned long end)
362 {
363 	unsigned long next;
364 	unsigned long addr = start;
365 	pgd_t *src_pgd = pgd_offset_k(start);
366 
367 	dst_pgd = pgd_offset_raw(dst_pgd, start);
368 	do {
369 		next = pgd_addr_end(addr, end);
370 		if (pgd_none(*src_pgd))
371 			continue;
372 		if (copy_pud(dst_pgd, src_pgd, addr, next))
373 			return -ENOMEM;
374 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
375 
376 	return 0;
377 }
378 
379 /*
380  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
381  *
382  * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
383  * we don't need to free it here.
384  */
385 int swsusp_arch_resume(void)
386 {
387 	int rc = 0;
388 	void *zero_page;
389 	size_t exit_size;
390 	pgd_t *tmp_pg_dir;
391 	void *lm_restore_pblist;
392 	phys_addr_t phys_hibernate_exit;
393 	void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
394 					  void *, phys_addr_t, phys_addr_t);
395 
396 	/*
397 	 * Locate the exit code in the bottom-but-one page, so that *NULL
398 	 * still has disastrous affects.
399 	 */
400 	hibernate_exit = (void *)PAGE_SIZE;
401 	exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
402 	/*
403 	 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
404 	 * a new set of ttbr0 page tables and load them.
405 	 */
406 	rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
407 				   (unsigned long)hibernate_exit,
408 				   &phys_hibernate_exit,
409 				   (void *)get_safe_page, GFP_ATOMIC);
410 	if (rc) {
411 		pr_err("Failed to create safe executable page for hibernate_exit code.");
412 		goto out;
413 	}
414 
415 	/*
416 	 * The hibernate exit text contains a set of el2 vectors, that will
417 	 * be executed at el2 with the mmu off in order to reload hyp-stub.
418 	 */
419 	__flush_dcache_area(hibernate_exit, exit_size);
420 
421 	/*
422 	 * Restoring the memory image will overwrite the ttbr1 page tables.
423 	 * Create a second copy of just the linear map, and use this when
424 	 * restoring.
425 	 */
426 	tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
427 	if (!tmp_pg_dir) {
428 		pr_err("Failed to allocate memory for temporary page tables.");
429 		rc = -ENOMEM;
430 		goto out;
431 	}
432 	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
433 	if (rc)
434 		goto out;
435 
436 	/*
437 	 * Since we only copied the linear map, we need to find restore_pblist's
438 	 * linear map address.
439 	 */
440 	lm_restore_pblist = LMADDR(restore_pblist);
441 
442 	/*
443 	 * KASLR will cause the el2 vectors to be in a different location in
444 	 * the resumed kernel. Load hibernate's temporary copy into el2.
445 	 *
446 	 * We can skip this step if we booted at EL1, or are running with VHE.
447 	 */
448 	if (el2_reset_needed()) {
449 		phys_addr_t el2_vectors = phys_hibernate_exit;  /* base */
450 		el2_vectors += hibernate_el2_vectors -
451 			       __hibernate_exit_text_start;     /* offset */
452 
453 		__hyp_set_vectors(el2_vectors);
454 	}
455 
456 	/*
457 	 * We need a zero page that is zero before & after resume in order to
458 	 * to break before make on the ttbr1 page tables.
459 	 */
460 	zero_page = (void *)get_safe_page(GFP_ATOMIC);
461 
462 	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463 		       resume_hdr.reenter_kernel, lm_restore_pblist,
464 		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
465 
466 out:
467 	return rc;
468 }
469 
470 static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
471 					     unsigned long action, void *ptr)
472 {
473 	if (action == PM_HIBERNATION_PREPARE &&
474 	     cpumask_first(cpu_online_mask) != 0) {
475 		pr_warn("CPU0 is offline.\n");
476 		return notifier_from_errno(-ENODEV);
477 	}
478 
479 	return NOTIFY_OK;
480 }
481 
482 static int __init check_boot_cpu_online_init(void)
483 {
484 	/*
485 	 * Set this pm_notifier callback with a lower priority than
486 	 * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
487 	 * called earlier to disable cpu hotplug before the cpu online check.
488 	 */
489 	pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
490 
491 	return 0;
492 }
493 core_initcall(check_boot_cpu_online_init);
494