1 /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 2 * GPL v2 and any later version. 3 */ 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/kthread.h> 7 #include <linux/module.h> 8 #include <linux/sched.h> 9 #include <linux/stop_machine.h> 10 #include <linux/syscalls.h> 11 #include <linux/interrupt.h> 12 13 #include <asm/atomic.h> 14 #include <asm/uaccess.h> 15 16 /* This controls the threads on each CPU. */ 17 enum stopmachine_state { 18 /* Dummy starting state for thread. */ 19 STOPMACHINE_NONE, 20 /* Awaiting everyone to be scheduled. */ 21 STOPMACHINE_PREPARE, 22 /* Disable interrupts. */ 23 STOPMACHINE_DISABLE_IRQ, 24 /* Run the function */ 25 STOPMACHINE_RUN, 26 /* Exit */ 27 STOPMACHINE_EXIT, 28 }; 29 static enum stopmachine_state state; 30 31 struct stop_machine_data { 32 int (*fn)(void *); 33 void *data; 34 int fnret; 35 }; 36 37 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 38 static unsigned int num_threads; 39 static atomic_t thread_ack; 40 static struct completion finished; 41 static DEFINE_MUTEX(lock); 42 43 static void set_state(enum stopmachine_state newstate) 44 { 45 /* Reset ack counter. */ 46 atomic_set(&thread_ack, num_threads); 47 smp_wmb(); 48 state = newstate; 49 } 50 51 /* Last one to ack a state moves to the next state. */ 52 static void ack_state(void) 53 { 54 if (atomic_dec_and_test(&thread_ack)) { 55 /* If we're the last one to ack the EXIT, we're finished. */ 56 if (state == STOPMACHINE_EXIT) 57 complete(&finished); 58 else 59 set_state(state + 1); 60 } 61 } 62 63 /* This is the actual thread which stops the CPU. It exits by itself rather 64 * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ 65 static int stop_cpu(struct stop_machine_data *smdata) 66 { 67 enum stopmachine_state curstate = STOPMACHINE_NONE; 68 69 /* Simple state machine */ 70 do { 71 /* Chill out and ensure we re-read stopmachine_state. */ 72 cpu_relax(); 73 if (state != curstate) { 74 curstate = state; 75 switch (curstate) { 76 case STOPMACHINE_DISABLE_IRQ: 77 local_irq_disable(); 78 hard_irq_disable(); 79 break; 80 case STOPMACHINE_RUN: 81 /* |= allows error detection if functions on 82 * multiple CPUs. */ 83 smdata->fnret |= smdata->fn(smdata->data); 84 break; 85 default: 86 break; 87 } 88 ack_state(); 89 } 90 } while (curstate != STOPMACHINE_EXIT); 91 92 local_irq_enable(); 93 do_exit(0); 94 } 95 96 /* Callback for CPUs which aren't supposed to do anything. */ 97 static int chill(void *unused) 98 { 99 return 0; 100 } 101 102 int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 103 { 104 int i, err; 105 struct stop_machine_data active, idle; 106 struct task_struct **threads; 107 108 active.fn = fn; 109 active.data = data; 110 active.fnret = 0; 111 idle.fn = chill; 112 idle.data = NULL; 113 114 /* This could be too big for stack on large machines. */ 115 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); 116 if (!threads) 117 return -ENOMEM; 118 119 /* Set up initial state. */ 120 mutex_lock(&lock); 121 init_completion(&finished); 122 num_threads = num_online_cpus(); 123 set_state(STOPMACHINE_PREPARE); 124 125 for_each_online_cpu(i) { 126 struct stop_machine_data *smdata = &idle; 127 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 128 129 if (!cpus) { 130 if (i == first_cpu(cpu_online_map)) 131 smdata = &active; 132 } else { 133 if (cpu_isset(i, *cpus)) 134 smdata = &active; 135 } 136 137 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", 138 i); 139 if (IS_ERR(threads[i])) { 140 err = PTR_ERR(threads[i]); 141 threads[i] = NULL; 142 goto kill_threads; 143 } 144 145 /* Place it onto correct cpu. */ 146 kthread_bind(threads[i], i); 147 148 /* Make it highest prio. */ 149 if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m)) 150 BUG(); 151 } 152 153 /* We've created all the threads. Wake them all: hold this CPU so one 154 * doesn't hit this CPU until we're ready. */ 155 get_cpu(); 156 for_each_online_cpu(i) 157 wake_up_process(threads[i]); 158 159 /* This will release the thread on our CPU. */ 160 put_cpu(); 161 wait_for_completion(&finished); 162 mutex_unlock(&lock); 163 164 kfree(threads); 165 166 return active.fnret; 167 168 kill_threads: 169 for_each_online_cpu(i) 170 if (threads[i]) 171 kthread_stop(threads[i]); 172 mutex_unlock(&lock); 173 174 kfree(threads); 175 return err; 176 } 177 178 int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 179 { 180 int ret; 181 182 /* No CPUs can come up or down during this. */ 183 get_online_cpus(); 184 ret = __stop_machine(fn, data, cpus); 185 put_online_cpus(); 186 187 return ret; 188 } 189 EXPORT_SYMBOL_GPL(stop_machine); 190