1 #include <linux/ftrace.h> 2 #include <linux/percpu.h> 3 #include <linux/slab.h> 4 #include <asm/cacheflush.h> 5 #include <asm/debug-monitors.h> 6 #include <asm/pgtable.h> 7 #include <asm/memory.h> 8 #include <asm/mmu_context.h> 9 #include <asm/smp_plat.h> 10 #include <asm/suspend.h> 11 #include <asm/tlbflush.h> 12 13 extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); 14 /* 15 * This is called by __cpu_suspend_enter() to save the state, and do whatever 16 * flushing is required to ensure that when the CPU goes to sleep we have 17 * the necessary data available when the caches are not searched. 18 * 19 * ptr: CPU context virtual address 20 * save_ptr: address of the location where the context physical address 21 * must be saved 22 */ 23 void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, 24 phys_addr_t *save_ptr) 25 { 26 *save_ptr = virt_to_phys(ptr); 27 28 cpu_do_suspend(ptr); 29 /* 30 * Only flush the context that must be retrieved with the MMU 31 * off. VA primitives ensure the flush is applied to all 32 * cache levels so context is pushed to DRAM. 33 */ 34 __flush_dcache_area(ptr, sizeof(*ptr)); 35 __flush_dcache_area(save_ptr, sizeof(*save_ptr)); 36 } 37 38 /* 39 * This hook is provided so that cpu_suspend code can restore HW 40 * breakpoints as early as possible in the resume path, before reenabling 41 * debug exceptions. Code cannot be run from a CPU PM notifier since by the 42 * time the notifier runs debug exceptions might have been enabled already, 43 * with HW breakpoints registers content still in an unknown state. 44 */ 45 static void (*hw_breakpoint_restore)(void *); 46 void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) 47 { 48 /* Prevent multiple restore hook initializations */ 49 if (WARN_ON(hw_breakpoint_restore)) 50 return; 51 hw_breakpoint_restore = hw_bp_restore; 52 } 53 54 /* 55 * cpu_suspend 56 * 57 * arg: argument to pass to the finisher function 58 * fn: finisher function pointer 59 * 60 */ 61 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 62 { 63 struct mm_struct *mm = current->active_mm; 64 int ret; 65 unsigned long flags; 66 67 /* 68 * From this point debug exceptions are disabled to prevent 69 * updates to mdscr register (saved and restored along with 70 * general purpose registers) from kernel debuggers. 71 */ 72 local_dbg_save(flags); 73 74 /* 75 * Function graph tracer state gets incosistent when the kernel 76 * calls functions that never return (aka suspend finishers) hence 77 * disable graph tracing during their execution. 78 */ 79 pause_graph_tracing(); 80 81 /* 82 * mm context saved on the stack, it will be restored when 83 * the cpu comes out of reset through the identity mapped 84 * page tables, so that the thread address space is properly 85 * set-up on function return. 86 */ 87 ret = __cpu_suspend_enter(arg, fn); 88 if (ret == 0) { 89 /* 90 * We are resuming from reset with TTBR0_EL1 set to the 91 * idmap to enable the MMU; set the TTBR0 to the reserved 92 * page tables to prevent speculative TLB allocations, flush 93 * the local tlb and set the default tcr_el1.t0sz so that 94 * the TTBR0 address space set-up is properly restored. 95 * If the current active_mm != &init_mm we entered cpu_suspend 96 * with mappings in TTBR0 that must be restored, so we switch 97 * them back to complete the address space configuration 98 * restoration before returning. 99 */ 100 cpu_set_reserved_ttbr0(); 101 local_flush_tlb_all(); 102 cpu_set_default_tcr_t0sz(); 103 104 if (mm != &init_mm) 105 cpu_switch_mm(mm->pgd, mm); 106 107 /* 108 * Restore per-cpu offset before any kernel 109 * subsystem relying on it has a chance to run. 110 */ 111 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 112 113 /* 114 * Restore HW breakpoint registers to sane values 115 * before debug exceptions are possibly reenabled 116 * through local_dbg_restore. 117 */ 118 if (hw_breakpoint_restore) 119 hw_breakpoint_restore(NULL); 120 } 121 122 unpause_graph_tracing(); 123 124 /* 125 * Restore pstate flags. OS lock and mdscr have been already 126 * restored, so from this point onwards, debugging is fully 127 * renabled if it was enabled when core started shutdown. 128 */ 129 local_dbg_restore(flags); 130 131 return ret; 132 } 133 134 struct sleep_save_sp sleep_save_sp; 135 136 static int __init cpu_suspend_init(void) 137 { 138 void *ctx_ptr; 139 140 /* ctx_ptr is an array of physical addresses */ 141 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); 142 143 if (WARN_ON(!ctx_ptr)) 144 return -ENOMEM; 145 146 sleep_save_sp.save_ptr_stash = ctx_ptr; 147 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); 148 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); 149 150 return 0; 151 } 152 early_initcall(cpu_suspend_init); 153