xref: /openbmc/linux/arch/arm/kernel/suspend.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/ftrace.h>
3  #include <linux/init.h>
4  #include <linux/slab.h>
5  #include <linux/mm_types.h>
6  #include <linux/pgtable.h>
7  
8  #include <asm/bugs.h>
9  #include <asm/cacheflush.h>
10  #include <asm/idmap.h>
11  #include <asm/page.h>
12  #include <asm/smp_plat.h>
13  #include <asm/suspend.h>
14  #include <asm/tlbflush.h>
15  
16  extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
17  extern void cpu_resume_mmu(void);
18  
19  #ifdef CONFIG_MMU
cpu_suspend(unsigned long arg,int (* fn)(unsigned long))20  int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
21  {
22  	struct mm_struct *mm = current->active_mm;
23  	u32 __mpidr = cpu_logical_map(smp_processor_id());
24  	int ret;
25  
26  	if (!idmap_pgd)
27  		return -EINVAL;
28  
29  	/*
30  	 * Function graph tracer state gets incosistent when the kernel
31  	 * calls functions that never return (aka suspend finishers) hence
32  	 * disable graph tracing during their execution.
33  	 */
34  	pause_graph_tracing();
35  
36  	/*
37  	 * Provide a temporary page table with an identity mapping for
38  	 * the MMU-enable code, required for resuming.  On successful
39  	 * resume (indicated by a zero return code), we need to switch
40  	 * back to the correct page tables.
41  	 */
42  	ret = __cpu_suspend(arg, fn, __mpidr);
43  
44  	unpause_graph_tracing();
45  
46  	if (ret == 0) {
47  		cpu_switch_mm(mm->pgd, mm);
48  		local_flush_bp_all();
49  		local_flush_tlb_all();
50  		check_other_bugs();
51  	}
52  
53  	return ret;
54  }
55  #else
cpu_suspend(unsigned long arg,int (* fn)(unsigned long))56  int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
57  {
58  	u32 __mpidr = cpu_logical_map(smp_processor_id());
59  	int ret;
60  
61  	pause_graph_tracing();
62  	ret = __cpu_suspend(arg, fn, __mpidr);
63  	unpause_graph_tracing();
64  
65  	return ret;
66  }
67  #define	idmap_pgd	NULL
68  #endif
69  
70  /*
71   * This is called by __cpu_suspend() to save the state, and do whatever
72   * flushing is required to ensure that when the CPU goes to sleep we have
73   * the necessary data available when the caches are not searched.
74   */
__cpu_suspend_save(u32 * ptr,u32 ptrsz,u32 sp,u32 * save_ptr)75  void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
76  {
77  	u32 *ctx = ptr;
78  
79  	*save_ptr = virt_to_phys(ptr);
80  
81  	/* This must correspond to the LDM in cpu_resume() assembly */
82  	*ptr++ = virt_to_phys(idmap_pgd);
83  	*ptr++ = sp;
84  	*ptr++ = virt_to_phys(cpu_do_resume);
85  
86  	cpu_do_suspend(ptr);
87  
88  	flush_cache_louis();
89  
90  	/*
91  	 * flush_cache_louis does not guarantee that
92  	 * save_ptr and ptr are cleaned to main memory,
93  	 * just up to the Level of Unification Inner Shareable.
94  	 * Since the context pointer and context itself
95  	 * are to be retrieved with the MMU off that
96  	 * data must be cleaned from all cache levels
97  	 * to main memory using "area" cache primitives.
98  	*/
99  	__cpuc_flush_dcache_area(ctx, ptrsz);
100  	__cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
101  
102  	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
103  	outer_clean_range(virt_to_phys(save_ptr),
104  			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
105  }
106  
107  extern struct sleep_save_sp sleep_save_sp;
108  
cpu_suspend_alloc_sp(void)109  static int cpu_suspend_alloc_sp(void)
110  {
111  	void *ctx_ptr;
112  	/* ctx_ptr is an array of physical addresses */
113  	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
114  
115  	if (WARN_ON(!ctx_ptr))
116  		return -ENOMEM;
117  	sleep_save_sp.save_ptr_stash = ctx_ptr;
118  	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
119  	sync_cache_w(&sleep_save_sp);
120  	return 0;
121  }
122  early_initcall(cpu_suspend_alloc_sp);
123