xref: /openbmc/linux/kernel/power/process.c (revision 9d4fa1a1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/power/process.c - Functions for starting/stopping processes on
4  *                           suspend transitions.
5  *
6  * Originally from swsusp.
7  */
8 
9 
10 #undef DEBUG
11 
12 #include <linux/interrupt.h>
13 #include <linux/oom.h>
14 #include <linux/suspend.h>
15 #include <linux/module.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task.h>
18 #include <linux/syscalls.h>
19 #include <linux/freezer.h>
20 #include <linux/delay.h>
21 #include <linux/workqueue.h>
22 #include <linux/kmod.h>
23 #include <trace/events/power.h>
24 #include <linux/cpuset.h>
25 
26 /*
27  * Timeout for stopping processes
28  */
29 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
30 
31 static int try_to_freeze_tasks(bool user_only)
32 {
33 	struct task_struct *g, *p;
34 	unsigned long end_time;
35 	unsigned int todo;
36 	bool wq_busy = false;
37 	ktime_t start, end, elapsed;
38 	unsigned int elapsed_msecs;
39 	bool wakeup = false;
40 	int sleep_usecs = USEC_PER_MSEC;
41 
42 	start = ktime_get_boottime();
43 
44 	end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
45 
46 	if (!user_only)
47 		freeze_workqueues_begin();
48 
49 	while (true) {
50 		todo = 0;
51 		read_lock(&tasklist_lock);
52 		for_each_process_thread(g, p) {
53 			if (p == current || !freeze_task(p))
54 				continue;
55 
56 			if (!freezer_should_skip(p))
57 				todo++;
58 		}
59 		read_unlock(&tasklist_lock);
60 
61 		if (!user_only) {
62 			wq_busy = freeze_workqueues_busy();
63 			todo += wq_busy;
64 		}
65 
66 		if (!todo || time_after(jiffies, end_time))
67 			break;
68 
69 		if (pm_wakeup_pending()) {
70 			wakeup = true;
71 			break;
72 		}
73 
74 		/*
75 		 * We need to retry, but first give the freezing tasks some
76 		 * time to enter the refrigerator.  Start with an initial
77 		 * 1 ms sleep followed by exponential backoff until 8 ms.
78 		 */
79 		usleep_range(sleep_usecs / 2, sleep_usecs);
80 		if (sleep_usecs < 8 * USEC_PER_MSEC)
81 			sleep_usecs *= 2;
82 	}
83 
84 	end = ktime_get_boottime();
85 	elapsed = ktime_sub(end, start);
86 	elapsed_msecs = ktime_to_ms(elapsed);
87 
88 	if (todo) {
89 		pr_cont("\n");
90 		pr_err("Freezing of tasks %s after %d.%03d seconds "
91 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
92 		       wakeup ? "aborted" : "failed",
93 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
94 		       todo - wq_busy, wq_busy);
95 
96 		if (wq_busy)
97 			show_workqueue_state();
98 
99 		if (!wakeup || pm_debug_messages_on) {
100 			read_lock(&tasklist_lock);
101 			for_each_process_thread(g, p) {
102 				if (p != current && !freezer_should_skip(p)
103 				    && freezing(p) && !frozen(p))
104 					sched_show_task(p);
105 			}
106 			read_unlock(&tasklist_lock);
107 		}
108 	} else {
109 		pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
110 			elapsed_msecs % 1000);
111 	}
112 
113 	return todo ? -EBUSY : 0;
114 }
115 
116 /**
117  * freeze_processes - Signal user space processes to enter the refrigerator.
118  * The current thread will not be frozen.  The same process that calls
119  * freeze_processes must later call thaw_processes.
120  *
121  * On success, returns 0.  On failure, -errno and system is fully thawed.
122  */
123 int freeze_processes(void)
124 {
125 	int error;
126 
127 	error = __usermodehelper_disable(UMH_FREEZING);
128 	if (error)
129 		return error;
130 
131 	/* Make sure this task doesn't get frozen */
132 	current->flags |= PF_SUSPEND_TASK;
133 
134 	if (!pm_freezing)
135 		atomic_inc(&system_freezing_cnt);
136 
137 	pm_wakeup_clear(true);
138 	pr_info("Freezing user space processes ... ");
139 	pm_freezing = true;
140 	error = try_to_freeze_tasks(true);
141 	if (!error) {
142 		__usermodehelper_set_disable_depth(UMH_DISABLED);
143 		pr_cont("done.");
144 	}
145 	pr_cont("\n");
146 	BUG_ON(in_atomic());
147 
148 	/*
149 	 * Now that the whole userspace is frozen we need to disbale
150 	 * the OOM killer to disallow any further interference with
151 	 * killable tasks. There is no guarantee oom victims will
152 	 * ever reach a point they go away we have to wait with a timeout.
153 	 */
154 	if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
155 		error = -EBUSY;
156 
157 	if (error)
158 		thaw_processes();
159 	return error;
160 }
161 
162 /**
163  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
164  *
165  * On success, returns 0.  On failure, -errno and only the kernel threads are
166  * thawed, so as to give a chance to the caller to do additional cleanups
167  * (if any) before thawing the userspace tasks. So, it is the responsibility
168  * of the caller to thaw the userspace tasks, when the time is right.
169  */
170 int freeze_kernel_threads(void)
171 {
172 	int error;
173 
174 	pr_info("Freezing remaining freezable tasks ... ");
175 
176 	pm_nosig_freezing = true;
177 	error = try_to_freeze_tasks(false);
178 	if (!error)
179 		pr_cont("done.");
180 
181 	pr_cont("\n");
182 	BUG_ON(in_atomic());
183 
184 	if (error)
185 		thaw_kernel_threads();
186 	return error;
187 }
188 
189 void thaw_processes(void)
190 {
191 	struct task_struct *g, *p;
192 	struct task_struct *curr = current;
193 
194 	trace_suspend_resume(TPS("thaw_processes"), 0, true);
195 	if (pm_freezing)
196 		atomic_dec(&system_freezing_cnt);
197 	pm_freezing = false;
198 	pm_nosig_freezing = false;
199 
200 	oom_killer_enable();
201 
202 	pr_info("Restarting tasks ... ");
203 
204 	__usermodehelper_set_disable_depth(UMH_FREEZING);
205 	thaw_workqueues();
206 
207 	cpuset_wait_for_hotplug();
208 
209 	read_lock(&tasklist_lock);
210 	for_each_process_thread(g, p) {
211 		/* No other threads should have PF_SUSPEND_TASK set */
212 		WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
213 		__thaw_task(p);
214 	}
215 	read_unlock(&tasklist_lock);
216 
217 	WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
218 	curr->flags &= ~PF_SUSPEND_TASK;
219 
220 	usermodehelper_enable();
221 
222 	schedule();
223 	pr_cont("done.\n");
224 	trace_suspend_resume(TPS("thaw_processes"), 0, false);
225 }
226 
227 void thaw_kernel_threads(void)
228 {
229 	struct task_struct *g, *p;
230 
231 	pm_nosig_freezing = false;
232 	pr_info("Restarting kernel threads ... ");
233 
234 	thaw_workqueues();
235 
236 	read_lock(&tasklist_lock);
237 	for_each_process_thread(g, p) {
238 		if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
239 			__thaw_task(p);
240 	}
241 	read_unlock(&tasklist_lock);
242 
243 	schedule();
244 	pr_cont("done.\n");
245 }
246