1 /* 2 * drivers/power/process.c - Functions for starting/stopping processes on 3 * suspend transitions. 4 * 5 * Originally from swsusp. 6 */ 7 8 9 #undef DEBUG 10 11 #include <linux/interrupt.h> 12 #include <linux/oom.h> 13 #include <linux/suspend.h> 14 #include <linux/module.h> 15 #include <linux/syscalls.h> 16 #include <linux/freezer.h> 17 #include <linux/delay.h> 18 #include <linux/workqueue.h> 19 #include <linux/kmod.h> 20 21 /* 22 * Timeout for stopping processes 23 */ 24 #define TIMEOUT (20 * HZ) 25 26 static int try_to_freeze_tasks(bool user_only) 27 { 28 struct task_struct *g, *p; 29 unsigned long end_time; 30 unsigned int todo; 31 bool wq_busy = false; 32 struct timeval start, end; 33 u64 elapsed_csecs64; 34 unsigned int elapsed_csecs; 35 bool wakeup = false; 36 37 do_gettimeofday(&start); 38 39 end_time = jiffies + TIMEOUT; 40 41 if (!user_only) 42 freeze_workqueues_begin(); 43 44 while (true) { 45 todo = 0; 46 read_lock(&tasklist_lock); 47 do_each_thread(g, p) { 48 if (p == current || !freeze_task(p)) 49 continue; 50 51 /* 52 * Now that we've done set_freeze_flag, don't 53 * perturb a task in TASK_STOPPED or TASK_TRACED. 54 * It is "frozen enough". If the task does wake 55 * up, it will immediately call try_to_freeze. 56 * 57 * Because freeze_task() goes through p's scheduler lock, it's 58 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING 59 * transition can't race with task state testing here. 60 */ 61 if (!task_is_stopped_or_traced(p) && 62 !freezer_should_skip(p)) 63 todo++; 64 } while_each_thread(g, p); 65 read_unlock(&tasklist_lock); 66 67 if (!user_only) { 68 wq_busy = freeze_workqueues_busy(); 69 todo += wq_busy; 70 } 71 72 if (!todo || time_after(jiffies, end_time)) 73 break; 74 75 if (pm_wakeup_pending()) { 76 wakeup = true; 77 break; 78 } 79 80 /* 81 * We need to retry, but first give the freezing tasks some 82 * time to enter the refrigerator. 83 */ 84 msleep(10); 85 } 86 87 do_gettimeofday(&end); 88 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); 89 do_div(elapsed_csecs64, NSEC_PER_SEC / 100); 90 elapsed_csecs = elapsed_csecs64; 91 92 if (todo) { 93 printk("\n"); 94 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " 95 "(%d tasks refusing to freeze, wq_busy=%d):\n", 96 wakeup ? "aborted" : "failed", 97 elapsed_csecs / 100, elapsed_csecs % 100, 98 todo - wq_busy, wq_busy); 99 100 if (!wakeup) { 101 read_lock(&tasklist_lock); 102 do_each_thread(g, p) { 103 if (p != current && !freezer_should_skip(p) 104 && freezing(p) && !frozen(p)) 105 sched_show_task(p); 106 } while_each_thread(g, p); 107 read_unlock(&tasklist_lock); 108 } 109 } else { 110 printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, 111 elapsed_csecs % 100); 112 } 113 114 return todo ? -EBUSY : 0; 115 } 116 117 /** 118 * freeze_processes - Signal user space processes to enter the refrigerator. 119 * 120 * On success, returns 0. On failure, -errno and system is fully thawed. 121 */ 122 int freeze_processes(void) 123 { 124 int error; 125 126 error = __usermodehelper_disable(UMH_FREEZING); 127 if (error) 128 return error; 129 130 if (!pm_freezing) 131 atomic_inc(&system_freezing_cnt); 132 133 printk("Freezing user space processes ... "); 134 pm_freezing = true; 135 error = try_to_freeze_tasks(true); 136 if (!error) { 137 printk("done."); 138 __usermodehelper_set_disable_depth(UMH_DISABLED); 139 oom_killer_disable(); 140 } 141 printk("\n"); 142 BUG_ON(in_atomic()); 143 144 if (error) 145 thaw_processes(); 146 return error; 147 } 148 149 /** 150 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 151 * 152 * On success, returns 0. On failure, -errno and only the kernel threads are 153 * thawed, so as to give a chance to the caller to do additional cleanups 154 * (if any) before thawing the userspace tasks. So, it is the responsibility 155 * of the caller to thaw the userspace tasks, when the time is right. 156 */ 157 int freeze_kernel_threads(void) 158 { 159 int error; 160 161 printk("Freezing remaining freezable tasks ... "); 162 pm_nosig_freezing = true; 163 error = try_to_freeze_tasks(false); 164 if (!error) 165 printk("done."); 166 167 printk("\n"); 168 BUG_ON(in_atomic()); 169 170 if (error) 171 thaw_kernel_threads(); 172 return error; 173 } 174 175 void thaw_processes(void) 176 { 177 struct task_struct *g, *p; 178 179 if (pm_freezing) 180 atomic_dec(&system_freezing_cnt); 181 pm_freezing = false; 182 pm_nosig_freezing = false; 183 184 oom_killer_enable(); 185 186 printk("Restarting tasks ... "); 187 188 thaw_workqueues(); 189 190 read_lock(&tasklist_lock); 191 do_each_thread(g, p) { 192 __thaw_task(p); 193 } while_each_thread(g, p); 194 read_unlock(&tasklist_lock); 195 196 usermodehelper_enable(); 197 198 schedule(); 199 printk("done.\n"); 200 } 201 202 void thaw_kernel_threads(void) 203 { 204 struct task_struct *g, *p; 205 206 pm_nosig_freezing = false; 207 printk("Restarting kernel threads ... "); 208 209 thaw_workqueues(); 210 211 read_lock(&tasklist_lock); 212 do_each_thread(g, p) { 213 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) 214 __thaw_task(p); 215 } while_each_thread(g, p); 216 read_unlock(&tasklist_lock); 217 218 schedule(); 219 printk("done.\n"); 220 } 221