1 /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 2 * GPL v2 and any later version. 3 */ 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/kthread.h> 7 #include <linux/module.h> 8 #include <linux/sched.h> 9 #include <linux/stop_machine.h> 10 #include <linux/syscalls.h> 11 #include <linux/interrupt.h> 12 13 #include <asm/atomic.h> 14 #include <asm/uaccess.h> 15 16 /* Since we effect priority and affinity (both of which are visible 17 * to, and settable by outside processes) we do indirection via a 18 * kthread. */ 19 20 /* Thread to stop each CPU in user context. */ 21 enum stopmachine_state { 22 STOPMACHINE_WAIT, 23 STOPMACHINE_PREPARE, 24 STOPMACHINE_DISABLE_IRQ, 25 STOPMACHINE_EXIT, 26 }; 27 28 static enum stopmachine_state stopmachine_state; 29 static unsigned int stopmachine_num_threads; 30 static atomic_t stopmachine_thread_ack; 31 32 static int stopmachine(void *cpu) 33 { 34 int irqs_disabled = 0; 35 int prepared = 0; 36 37 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); 38 39 /* Ack: we are alive */ 40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 atomic_inc(&stopmachine_thread_ack); 42 43 /* Simple state machine */ 44 while (stopmachine_state != STOPMACHINE_EXIT) { 45 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 46 && !irqs_disabled) { 47 local_irq_disable(); 48 hard_irq_disable(); 49 irqs_disabled = 1; 50 /* Ack: irqs disabled. */ 51 smp_mb(); /* Must read state first. */ 52 atomic_inc(&stopmachine_thread_ack); 53 } else if (stopmachine_state == STOPMACHINE_PREPARE 54 && !prepared) { 55 /* Everyone is in place, hold CPU. */ 56 preempt_disable(); 57 prepared = 1; 58 smp_mb(); /* Must read state first. */ 59 atomic_inc(&stopmachine_thread_ack); 60 } 61 /* Yield in first stage: migration threads need to 62 * help our sisters onto their CPUs. */ 63 if (!prepared && !irqs_disabled) 64 yield(); 65 else 66 cpu_relax(); 67 } 68 69 /* Ack: we are exiting. */ 70 smp_mb(); /* Must read state first. */ 71 atomic_inc(&stopmachine_thread_ack); 72 73 if (irqs_disabled) 74 local_irq_enable(); 75 if (prepared) 76 preempt_enable(); 77 78 return 0; 79 } 80 81 /* Change the thread state */ 82 static void stopmachine_set_state(enum stopmachine_state state) 83 { 84 atomic_set(&stopmachine_thread_ack, 0); 85 smp_wmb(); 86 stopmachine_state = state; 87 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 88 cpu_relax(); 89 } 90 91 static int stop_machine(void) 92 { 93 int i, ret = 0; 94 95 atomic_set(&stopmachine_thread_ack, 0); 96 stopmachine_num_threads = 0; 97 stopmachine_state = STOPMACHINE_WAIT; 98 99 for_each_online_cpu(i) { 100 if (i == raw_smp_processor_id()) 101 continue; 102 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 103 if (ret < 0) 104 break; 105 stopmachine_num_threads++; 106 } 107 108 /* Wait for them all to come to life. */ 109 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 110 yield(); 111 112 /* If some failed, kill them all. */ 113 if (ret < 0) { 114 stopmachine_set_state(STOPMACHINE_EXIT); 115 return ret; 116 } 117 118 /* Now they are all started, make them hold the CPUs, ready. */ 119 preempt_disable(); 120 stopmachine_set_state(STOPMACHINE_PREPARE); 121 122 /* Make them disable irqs. */ 123 local_irq_disable(); 124 hard_irq_disable(); 125 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 126 127 return 0; 128 } 129 130 static void restart_machine(void) 131 { 132 stopmachine_set_state(STOPMACHINE_EXIT); 133 local_irq_enable(); 134 preempt_enable_no_resched(); 135 } 136 137 struct stop_machine_data 138 { 139 int (*fn)(void *); 140 void *data; 141 struct completion done; 142 }; 143 144 static int do_stop(void *_smdata) 145 { 146 struct stop_machine_data *smdata = _smdata; 147 int ret; 148 149 ret = stop_machine(); 150 if (ret == 0) { 151 ret = smdata->fn(smdata->data); 152 restart_machine(); 153 } 154 155 /* We're done: you can kthread_stop us now */ 156 complete(&smdata->done); 157 158 /* Wait for kthread_stop */ 159 set_current_state(TASK_INTERRUPTIBLE); 160 while (!kthread_should_stop()) { 161 schedule(); 162 set_current_state(TASK_INTERRUPTIBLE); 163 } 164 __set_current_state(TASK_RUNNING); 165 return ret; 166 } 167 168 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 169 unsigned int cpu) 170 { 171 static DEFINE_MUTEX(stopmachine_mutex); 172 struct stop_machine_data smdata; 173 struct task_struct *p; 174 175 smdata.fn = fn; 176 smdata.data = data; 177 init_completion(&smdata.done); 178 179 mutex_lock(&stopmachine_mutex); 180 181 /* If they don't care which CPU fn runs on, bind to any online one. */ 182 if (cpu == NR_CPUS) 183 cpu = raw_smp_processor_id(); 184 185 p = kthread_create(do_stop, &smdata, "kstopmachine"); 186 if (!IS_ERR(p)) { 187 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 188 189 /* One high-prio thread per cpu. We'll do this one. */ 190 sched_setscheduler(p, SCHED_FIFO, ¶m); 191 kthread_bind(p, cpu); 192 wake_up_process(p); 193 wait_for_completion(&smdata.done); 194 } 195 mutex_unlock(&stopmachine_mutex); 196 return p; 197 } 198 199 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 200 { 201 struct task_struct *p; 202 int ret; 203 204 /* No CPUs can come up or down during this. */ 205 get_online_cpus(); 206 p = __stop_machine_run(fn, data, cpu); 207 if (!IS_ERR(p)) 208 ret = kthread_stop(p); 209 else 210 ret = PTR_ERR(p); 211 put_online_cpus(); 212 213 return ret; 214 } 215 EXPORT_SYMBOL_GPL(stop_machine_run); 216