xref: /openbmc/linux/arch/arm64/kernel/suspend.c (revision 9e8e865bbe294a69666a1996bda3e87825b258c0)
1de818bd4SLorenzo Pieralisi #include <linux/ftrace.h>
2fb4a9602SLorenzo Pieralisi #include <linux/percpu.h>
395322526SLorenzo Pieralisi #include <linux/slab.h>
495322526SLorenzo Pieralisi #include <asm/cacheflush.h>
595322526SLorenzo Pieralisi #include <asm/debug-monitors.h>
695322526SLorenzo Pieralisi #include <asm/pgtable.h>
795322526SLorenzo Pieralisi #include <asm/memory.h>
8f43c2718SLorenzo Pieralisi #include <asm/mmu_context.h>
995322526SLorenzo Pieralisi #include <asm/smp_plat.h>
1095322526SLorenzo Pieralisi #include <asm/suspend.h>
1195322526SLorenzo Pieralisi #include <asm/tlbflush.h>
1295322526SLorenzo Pieralisi 
13714f5992SLorenzo Pieralisi extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
1495322526SLorenzo Pieralisi /*
15714f5992SLorenzo Pieralisi  * This is called by __cpu_suspend_enter() to save the state, and do whatever
1695322526SLorenzo Pieralisi  * flushing is required to ensure that when the CPU goes to sleep we have
1795322526SLorenzo Pieralisi  * the necessary data available when the caches are not searched.
1895322526SLorenzo Pieralisi  *
19714f5992SLorenzo Pieralisi  * ptr: CPU context virtual address
20714f5992SLorenzo Pieralisi  * save_ptr: address of the location where the context physical address
2195322526SLorenzo Pieralisi  *           must be saved
2295322526SLorenzo Pieralisi  */
23714f5992SLorenzo Pieralisi void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
2495322526SLorenzo Pieralisi 				phys_addr_t *save_ptr)
2595322526SLorenzo Pieralisi {
2695322526SLorenzo Pieralisi 	*save_ptr = virt_to_phys(ptr);
2795322526SLorenzo Pieralisi 
2895322526SLorenzo Pieralisi 	cpu_do_suspend(ptr);
2995322526SLorenzo Pieralisi 	/*
3095322526SLorenzo Pieralisi 	 * Only flush the context that must be retrieved with the MMU
3195322526SLorenzo Pieralisi 	 * off. VA primitives ensure the flush is applied to all
3295322526SLorenzo Pieralisi 	 * cache levels so context is pushed to DRAM.
3395322526SLorenzo Pieralisi 	 */
3495322526SLorenzo Pieralisi 	__flush_dcache_area(ptr, sizeof(*ptr));
3595322526SLorenzo Pieralisi 	__flush_dcache_area(save_ptr, sizeof(*save_ptr));
3695322526SLorenzo Pieralisi }
3795322526SLorenzo Pieralisi 
3865c021bbSLorenzo Pieralisi /*
3965c021bbSLorenzo Pieralisi  * This hook is provided so that cpu_suspend code can restore HW
4065c021bbSLorenzo Pieralisi  * breakpoints as early as possible in the resume path, before reenabling
4165c021bbSLorenzo Pieralisi  * debug exceptions. Code cannot be run from a CPU PM notifier since by the
4265c021bbSLorenzo Pieralisi  * time the notifier runs debug exceptions might have been enabled already,
4365c021bbSLorenzo Pieralisi  * with HW breakpoints registers content still in an unknown state.
4465c021bbSLorenzo Pieralisi  */
4501b305a2SJisheng Zhang static void (*hw_breakpoint_restore)(void *);
4665c021bbSLorenzo Pieralisi void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
4765c021bbSLorenzo Pieralisi {
4865c021bbSLorenzo Pieralisi 	/* Prevent multiple restore hook initializations */
4965c021bbSLorenzo Pieralisi 	if (WARN_ON(hw_breakpoint_restore))
5065c021bbSLorenzo Pieralisi 		return;
5165c021bbSLorenzo Pieralisi 	hw_breakpoint_restore = hw_bp_restore;
5265c021bbSLorenzo Pieralisi }
5365c021bbSLorenzo Pieralisi 
54714f5992SLorenzo Pieralisi /*
55af391b15SSudeep Holla  * cpu_suspend
56714f5992SLorenzo Pieralisi  *
57714f5992SLorenzo Pieralisi  * arg: argument to pass to the finisher function
58714f5992SLorenzo Pieralisi  * fn: finisher function pointer
59714f5992SLorenzo Pieralisi  *
60714f5992SLorenzo Pieralisi  */
61af391b15SSudeep Holla int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
62714f5992SLorenzo Pieralisi {
63714f5992SLorenzo Pieralisi 	int ret;
64714f5992SLorenzo Pieralisi 	unsigned long flags;
6595322526SLorenzo Pieralisi 
6695322526SLorenzo Pieralisi 	/*
6795322526SLorenzo Pieralisi 	 * From this point debug exceptions are disabled to prevent
6895322526SLorenzo Pieralisi 	 * updates to mdscr register (saved and restored along with
6995322526SLorenzo Pieralisi 	 * general purpose registers) from kernel debuggers.
7095322526SLorenzo Pieralisi 	 */
7195322526SLorenzo Pieralisi 	local_dbg_save(flags);
7295322526SLorenzo Pieralisi 
7395322526SLorenzo Pieralisi 	/*
74de818bd4SLorenzo Pieralisi 	 * Function graph tracer state gets incosistent when the kernel
75de818bd4SLorenzo Pieralisi 	 * calls functions that never return (aka suspend finishers) hence
76de818bd4SLorenzo Pieralisi 	 * disable graph tracing during their execution.
77de818bd4SLorenzo Pieralisi 	 */
78de818bd4SLorenzo Pieralisi 	pause_graph_tracing();
79de818bd4SLorenzo Pieralisi 
80de818bd4SLorenzo Pieralisi 	/*
8195322526SLorenzo Pieralisi 	 * mm context saved on the stack, it will be restored when
8295322526SLorenzo Pieralisi 	 * the cpu comes out of reset through the identity mapped
8395322526SLorenzo Pieralisi 	 * page tables, so that the thread address space is properly
8495322526SLorenzo Pieralisi 	 * set-up on function return.
8595322526SLorenzo Pieralisi 	 */
86714f5992SLorenzo Pieralisi 	ret = __cpu_suspend_enter(arg, fn);
8795322526SLorenzo Pieralisi 	if (ret == 0) {
88f43c2718SLorenzo Pieralisi 		/*
89*9e8e865bSMark Rutland 		 * We are resuming from reset with the idmap active in TTBR0_EL1.
90*9e8e865bSMark Rutland 		 * We must uninstall the idmap and restore the expected MMU
91*9e8e865bSMark Rutland 		 * state before we can possibly return to userspace.
92f43c2718SLorenzo Pieralisi 		 */
93*9e8e865bSMark Rutland 		cpu_uninstall_idmap();
94fb4a9602SLorenzo Pieralisi 
95fb4a9602SLorenzo Pieralisi 		/*
96fb4a9602SLorenzo Pieralisi 		 * Restore per-cpu offset before any kernel
97fb4a9602SLorenzo Pieralisi 		 * subsystem relying on it has a chance to run.
98fb4a9602SLorenzo Pieralisi 		 */
99714f5992SLorenzo Pieralisi 		set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
100fb4a9602SLorenzo Pieralisi 
10165c021bbSLorenzo Pieralisi 		/*
10265c021bbSLorenzo Pieralisi 		 * Restore HW breakpoint registers to sane values
10365c021bbSLorenzo Pieralisi 		 * before debug exceptions are possibly reenabled
10465c021bbSLorenzo Pieralisi 		 * through local_dbg_restore.
10565c021bbSLorenzo Pieralisi 		 */
10665c021bbSLorenzo Pieralisi 		if (hw_breakpoint_restore)
10765c021bbSLorenzo Pieralisi 			hw_breakpoint_restore(NULL);
10895322526SLorenzo Pieralisi 	}
10995322526SLorenzo Pieralisi 
110de818bd4SLorenzo Pieralisi 	unpause_graph_tracing();
111de818bd4SLorenzo Pieralisi 
11295322526SLorenzo Pieralisi 	/*
11395322526SLorenzo Pieralisi 	 * Restore pstate flags. OS lock and mdscr have been already
11495322526SLorenzo Pieralisi 	 * restored, so from this point onwards, debugging is fully
11595322526SLorenzo Pieralisi 	 * renabled if it was enabled when core started shutdown.
11695322526SLorenzo Pieralisi 	 */
11795322526SLorenzo Pieralisi 	local_dbg_restore(flags);
11895322526SLorenzo Pieralisi 
11995322526SLorenzo Pieralisi 	return ret;
12095322526SLorenzo Pieralisi }
12195322526SLorenzo Pieralisi 
122c3684fbbSLaura Abbott struct sleep_save_sp sleep_save_sp;
12395322526SLorenzo Pieralisi 
12418ab7db6SLorenzo Pieralisi static int __init cpu_suspend_init(void)
12595322526SLorenzo Pieralisi {
12695322526SLorenzo Pieralisi 	void *ctx_ptr;
12795322526SLorenzo Pieralisi 
12895322526SLorenzo Pieralisi 	/* ctx_ptr is an array of physical addresses */
12995322526SLorenzo Pieralisi 	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
13095322526SLorenzo Pieralisi 
13195322526SLorenzo Pieralisi 	if (WARN_ON(!ctx_ptr))
13295322526SLorenzo Pieralisi 		return -ENOMEM;
13395322526SLorenzo Pieralisi 
13495322526SLorenzo Pieralisi 	sleep_save_sp.save_ptr_stash = ctx_ptr;
13595322526SLorenzo Pieralisi 	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
13695322526SLorenzo Pieralisi 	__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
13795322526SLorenzo Pieralisi 
13895322526SLorenzo Pieralisi 	return 0;
13995322526SLorenzo Pieralisi }
14095322526SLorenzo Pieralisi early_initcall(cpu_suspend_init);
141