xref: /openbmc/linux/kernel/power/process.c (revision c21b37f6)
1 /*
2  * drivers/power/process.c - Functions for starting/stopping processes on
3  *                           suspend transitions.
4  *
5  * Originally from swsusp.
6  */
7 
8 
9 #undef DEBUG
10 
11 #include <linux/interrupt.h>
12 #include <linux/suspend.h>
13 #include <linux/module.h>
14 #include <linux/syscalls.h>
15 #include <linux/freezer.h>
16 
17 /*
18  * Timeout for stopping processes
19  */
20 #define TIMEOUT	(20 * HZ)
21 
22 #define FREEZER_KERNEL_THREADS 0
23 #define FREEZER_USER_SPACE 1
24 
25 static inline int freezeable(struct task_struct * p)
26 {
27 	if ((p == current) ||
28 	    (p->flags & PF_NOFREEZE) ||
29 	    (p->exit_state != 0))
30 		return 0;
31 	return 1;
32 }
33 
34 /*
35  * freezing is complete, mark current process as frozen
36  */
37 static inline void frozen_process(void)
38 {
39 	if (!unlikely(current->flags & PF_NOFREEZE)) {
40 		current->flags |= PF_FROZEN;
41 		wmb();
42 	}
43 	clear_freeze_flag(current);
44 }
45 
46 /* Refrigerator is place where frozen processes are stored :-). */
47 void refrigerator(void)
48 {
49 	/* Hmm, should we be allowed to suspend when there are realtime
50 	   processes around? */
51 	long save;
52 
53 	task_lock(current);
54 	if (freezing(current)) {
55 		frozen_process();
56 		task_unlock(current);
57 	} else {
58 		task_unlock(current);
59 		return;
60 	}
61 	save = current->state;
62 	pr_debug("%s entered refrigerator\n", current->comm);
63 
64 	spin_lock_irq(&current->sighand->siglock);
65 	recalc_sigpending(); /* We sent fake signal, clean it up */
66 	spin_unlock_irq(&current->sighand->siglock);
67 
68 	for (;;) {
69 		set_current_state(TASK_UNINTERRUPTIBLE);
70 		if (!frozen(current))
71 			break;
72 		schedule();
73 	}
74 	pr_debug("%s left refrigerator\n", current->comm);
75 	__set_current_state(save);
76 }
77 
78 static void freeze_task(struct task_struct *p)
79 {
80 	unsigned long flags;
81 
82 	if (!freezing(p)) {
83 		rmb();
84 		if (!frozen(p)) {
85 			set_freeze_flag(p);
86 			if (p->state == TASK_STOPPED)
87 				force_sig_specific(SIGSTOP, p);
88 			spin_lock_irqsave(&p->sighand->siglock, flags);
89 			signal_wake_up(p, p->state == TASK_STOPPED);
90 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
91 		}
92 	}
93 }
94 
95 static void cancel_freezing(struct task_struct *p)
96 {
97 	unsigned long flags;
98 
99 	if (freezing(p)) {
100 		pr_debug("  clean up: %s\n", p->comm);
101 		clear_freeze_flag(p);
102 		spin_lock_irqsave(&p->sighand->siglock, flags);
103 		recalc_sigpending_and_wake(p);
104 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
105 	}
106 }
107 
108 static int try_to_freeze_tasks(int freeze_user_space)
109 {
110 	struct task_struct *g, *p;
111 	unsigned long end_time;
112 	unsigned int todo;
113 
114 	end_time = jiffies + TIMEOUT;
115 	do {
116 		todo = 0;
117 		read_lock(&tasklist_lock);
118 		do_each_thread(g, p) {
119 			if (frozen(p) || !freezeable(p))
120 				continue;
121 
122 			if (freeze_user_space) {
123 				if (p->state == TASK_TRACED &&
124 				    frozen(p->parent)) {
125 					cancel_freezing(p);
126 					continue;
127 				}
128 				/*
129 				 * Kernel threads should not have TIF_FREEZE set
130 				 * at this point, so we must ensure that either
131 				 * p->mm is not NULL *and* PF_BORROWED_MM is
132 				 * unset, or TIF_FRREZE is left unset.
133 				 * The task_lock() is necessary to prevent races
134 				 * with exit_mm() or use_mm()/unuse_mm() from
135 				 * occuring.
136 				 */
137 				task_lock(p);
138 				if (!p->mm || (p->flags & PF_BORROWED_MM)) {
139 					task_unlock(p);
140 					continue;
141 				}
142 				freeze_task(p);
143 				task_unlock(p);
144 			} else {
145 				freeze_task(p);
146 			}
147 			if (!freezer_should_skip(p))
148 				todo++;
149 		} while_each_thread(g, p);
150 		read_unlock(&tasklist_lock);
151 		yield();			/* Yield is okay here */
152 		if (time_after(jiffies, end_time))
153 			break;
154 	} while (todo);
155 
156 	if (todo) {
157 		/* This does not unfreeze processes that are already frozen
158 		 * (we have slightly ugly calling convention in that respect,
159 		 * and caller must call thaw_processes() if something fails),
160 		 * but it cleans up leftover PF_FREEZE requests.
161 		 */
162 		printk("\n");
163 		printk(KERN_ERR "Freezing of %s timed out after %d seconds "
164 				"(%d tasks refusing to freeze):\n",
165 				freeze_user_space ? "user space " : "tasks ",
166 				TIMEOUT / HZ, todo);
167 		show_state();
168 		read_lock(&tasklist_lock);
169 		do_each_thread(g, p) {
170 			task_lock(p);
171 			if (freezing(p) && !freezer_should_skip(p))
172 				printk(KERN_ERR " %s\n", p->comm);
173 			cancel_freezing(p);
174 			task_unlock(p);
175 		} while_each_thread(g, p);
176 		read_unlock(&tasklist_lock);
177 	}
178 
179 	return todo ? -EBUSY : 0;
180 }
181 
182 /**
183  *	freeze_processes - tell processes to enter the refrigerator
184  */
185 int freeze_processes(void)
186 {
187 	int error;
188 
189 	printk("Stopping tasks ... ");
190 	error = try_to_freeze_tasks(FREEZER_USER_SPACE);
191 	if (error)
192 		return error;
193 
194 	sys_sync();
195 	error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
196 	if (error)
197 		return error;
198 
199 	printk("done.\n");
200 	BUG_ON(in_atomic());
201 	return 0;
202 }
203 
204 static void thaw_tasks(int thaw_user_space)
205 {
206 	struct task_struct *g, *p;
207 
208 	read_lock(&tasklist_lock);
209 	do_each_thread(g, p) {
210 		if (!freezeable(p))
211 			continue;
212 
213 		if (!p->mm == thaw_user_space)
214 			continue;
215 
216 		thaw_process(p);
217 	} while_each_thread(g, p);
218 	read_unlock(&tasklist_lock);
219 }
220 
221 void thaw_processes(void)
222 {
223 	printk("Restarting tasks ... ");
224 	thaw_tasks(FREEZER_KERNEL_THREADS);
225 	thaw_tasks(FREEZER_USER_SPACE);
226 	schedule();
227 	printk("done.\n");
228 }
229 
230 EXPORT_SYMBOL(refrigerator);
231