1 /* 2 * drivers/power/process.c - Functions for starting/stopping processes on 3 * suspend transitions. 4 * 5 * Originally from swsusp. 6 */ 7 8 9 #undef DEBUG 10 11 #include <linux/interrupt.h> 12 #include <linux/oom.h> 13 #include <linux/suspend.h> 14 #include <linux/module.h> 15 #include <linux/sched/debug.h> 16 #include <linux/sched/task.h> 17 #include <linux/syscalls.h> 18 #include <linux/freezer.h> 19 #include <linux/delay.h> 20 #include <linux/workqueue.h> 21 #include <linux/kmod.h> 22 #include <trace/events/power.h> 23 #include <linux/cpuset.h> 24 25 /* 26 * Timeout for stopping processes 27 */ 28 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; 29 30 static int try_to_freeze_tasks(bool user_only) 31 { 32 struct task_struct *g, *p; 33 unsigned long end_time; 34 unsigned int todo; 35 bool wq_busy = false; 36 ktime_t start, end, elapsed; 37 unsigned int elapsed_msecs; 38 bool wakeup = false; 39 int sleep_usecs = USEC_PER_MSEC; 40 41 start = ktime_get_boottime(); 42 43 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); 44 45 if (!user_only) 46 freeze_workqueues_begin(); 47 48 while (true) { 49 todo = 0; 50 read_lock(&tasklist_lock); 51 for_each_process_thread(g, p) { 52 if (p == current || !freeze_task(p)) 53 continue; 54 55 if (!freezer_should_skip(p)) 56 todo++; 57 } 58 read_unlock(&tasklist_lock); 59 60 if (!user_only) { 61 wq_busy = freeze_workqueues_busy(); 62 todo += wq_busy; 63 } 64 65 if (!todo || time_after(jiffies, end_time)) 66 break; 67 68 if (pm_wakeup_pending()) { 69 wakeup = true; 70 break; 71 } 72 73 /* 74 * We need to retry, but first give the freezing tasks some 75 * time to enter the refrigerator. Start with an initial 76 * 1 ms sleep followed by exponential backoff until 8 ms. 77 */ 78 usleep_range(sleep_usecs / 2, sleep_usecs); 79 if (sleep_usecs < 8 * USEC_PER_MSEC) 80 sleep_usecs *= 2; 81 } 82 83 end = ktime_get_boottime(); 84 elapsed = ktime_sub(end, start); 85 elapsed_msecs = ktime_to_ms(elapsed); 86 87 if (todo) { 88 pr_cont("\n"); 89 pr_err("Freezing of tasks %s after %d.%03d seconds " 90 "(%d tasks refusing to freeze, wq_busy=%d):\n", 91 wakeup ? "aborted" : "failed", 92 elapsed_msecs / 1000, elapsed_msecs % 1000, 93 todo - wq_busy, wq_busy); 94 95 if (wq_busy) 96 show_workqueue_state(); 97 98 if (!wakeup) { 99 read_lock(&tasklist_lock); 100 for_each_process_thread(g, p) { 101 if (p != current && !freezer_should_skip(p) 102 && freezing(p) && !frozen(p)) 103 sched_show_task(p); 104 } 105 read_unlock(&tasklist_lock); 106 } 107 } else { 108 pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, 109 elapsed_msecs % 1000); 110 } 111 112 return todo ? -EBUSY : 0; 113 } 114 115 /** 116 * freeze_processes - Signal user space processes to enter the refrigerator. 117 * The current thread will not be frozen. The same process that calls 118 * freeze_processes must later call thaw_processes. 119 * 120 * On success, returns 0. On failure, -errno and system is fully thawed. 121 */ 122 int freeze_processes(void) 123 { 124 int error; 125 126 error = __usermodehelper_disable(UMH_FREEZING); 127 if (error) 128 return error; 129 130 /* Make sure this task doesn't get frozen */ 131 current->flags |= PF_SUSPEND_TASK; 132 133 if (!pm_freezing) 134 atomic_inc(&system_freezing_cnt); 135 136 pm_wakeup_clear(true); 137 pr_info("Freezing user space processes ... "); 138 pm_freezing = true; 139 error = try_to_freeze_tasks(true); 140 if (!error) { 141 __usermodehelper_set_disable_depth(UMH_DISABLED); 142 pr_cont("done."); 143 } 144 pr_cont("\n"); 145 BUG_ON(in_atomic()); 146 147 /* 148 * Now that the whole userspace is frozen we need to disbale 149 * the OOM killer to disallow any further interference with 150 * killable tasks. There is no guarantee oom victims will 151 * ever reach a point they go away we have to wait with a timeout. 152 */ 153 if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs))) 154 error = -EBUSY; 155 156 if (error) 157 thaw_processes(); 158 return error; 159 } 160 161 /** 162 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 163 * 164 * On success, returns 0. On failure, -errno and only the kernel threads are 165 * thawed, so as to give a chance to the caller to do additional cleanups 166 * (if any) before thawing the userspace tasks. So, it is the responsibility 167 * of the caller to thaw the userspace tasks, when the time is right. 168 */ 169 int freeze_kernel_threads(void) 170 { 171 int error; 172 173 pr_info("Freezing remaining freezable tasks ... "); 174 175 pm_nosig_freezing = true; 176 error = try_to_freeze_tasks(false); 177 if (!error) 178 pr_cont("done."); 179 180 pr_cont("\n"); 181 BUG_ON(in_atomic()); 182 183 if (error) 184 thaw_kernel_threads(); 185 return error; 186 } 187 188 void thaw_processes(void) 189 { 190 struct task_struct *g, *p; 191 struct task_struct *curr = current; 192 193 trace_suspend_resume(TPS("thaw_processes"), 0, true); 194 if (pm_freezing) 195 atomic_dec(&system_freezing_cnt); 196 pm_freezing = false; 197 pm_nosig_freezing = false; 198 199 oom_killer_enable(); 200 201 pr_info("Restarting tasks ... "); 202 203 __usermodehelper_set_disable_depth(UMH_FREEZING); 204 thaw_workqueues(); 205 206 cpuset_wait_for_hotplug(); 207 208 read_lock(&tasklist_lock); 209 for_each_process_thread(g, p) { 210 /* No other threads should have PF_SUSPEND_TASK set */ 211 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); 212 __thaw_task(p); 213 } 214 read_unlock(&tasklist_lock); 215 216 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); 217 curr->flags &= ~PF_SUSPEND_TASK; 218 219 usermodehelper_enable(); 220 221 schedule(); 222 pr_cont("done.\n"); 223 trace_suspend_resume(TPS("thaw_processes"), 0, false); 224 } 225 226 void thaw_kernel_threads(void) 227 { 228 struct task_struct *g, *p; 229 230 pm_nosig_freezing = false; 231 pr_info("Restarting kernel threads ... "); 232 233 thaw_workqueues(); 234 235 read_lock(&tasklist_lock); 236 for_each_process_thread(g, p) { 237 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) 238 __thaw_task(p); 239 } 240 read_unlock(&tasklist_lock); 241 242 schedule(); 243 pr_cont("done.\n"); 244 } 245