xref: /openbmc/linux/kernel/async.c (revision cdb80f63)
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 
14 /*
15 
16 Goals and Theory of Operation
17 
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21 
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27 
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31 
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34 
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41 
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48 
49 */
50 
51 #include <linux/async.h>
52 #include <linux/module.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/init.h>
56 #include <linux/kthread.h>
57 #include <asm/atomic.h>
58 
59 static async_cookie_t next_cookie = 1;
60 
61 #define MAX_THREADS	256
62 #define MAX_WORK	32768
63 
64 static LIST_HEAD(async_pending);
65 static LIST_HEAD(async_running);
66 static DEFINE_SPINLOCK(async_lock);
67 
68 static int async_enabled = 0;
69 
70 struct async_entry {
71 	struct list_head list;
72 	async_cookie_t   cookie;
73 	async_func_ptr	 *func;
74 	void             *data;
75 	struct list_head *running;
76 };
77 
78 static DECLARE_WAIT_QUEUE_HEAD(async_done);
79 static DECLARE_WAIT_QUEUE_HEAD(async_new);
80 
81 static atomic_t entry_count;
82 static atomic_t thread_count;
83 
84 extern int initcall_debug;
85 
86 
87 /*
88  * MUST be called with the lock held!
89  */
90 static async_cookie_t  __lowest_in_progress(struct list_head *running)
91 {
92 	struct async_entry *entry;
93 	if (!list_empty(&async_pending)) {
94 		entry = list_first_entry(&async_pending,
95 			struct async_entry, list);
96 		return entry->cookie;
97 	} else if (!list_empty(running)) {
98 		entry = list_first_entry(running,
99 			struct async_entry, list);
100 		return entry->cookie;
101 	} else {
102 		/* nothing in progress... next_cookie is "infinity" */
103 		return next_cookie;
104 	}
105 
106 }
107 /*
108  * pick the first pending entry and run it
109  */
110 static void run_one_entry(void)
111 {
112 	unsigned long flags;
113 	struct async_entry *entry;
114 	ktime_t calltime, delta, rettime;
115 
116 	/* 1) pick one task from the pending queue */
117 
118 	spin_lock_irqsave(&async_lock, flags);
119 	if (list_empty(&async_pending))
120 		goto out;
121 	entry = list_first_entry(&async_pending, struct async_entry, list);
122 
123 	/* 2) move it to the running queue */
124 	list_del(&entry->list);
125 	list_add_tail(&entry->list, &async_running);
126 	spin_unlock_irqrestore(&async_lock, flags);
127 
128 	/* 3) run it (and print duration)*/
129 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
130 		printk("calling  %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
131 		calltime = ktime_get();
132 	}
133 	entry->func(entry->data, entry->cookie);
134 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
135 		rettime = ktime_get();
136 		delta = ktime_sub(rettime, calltime);
137 		printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
138 			entry->func, ktime_to_ns(delta) >> 10);
139 	}
140 
141 	/* 4) remove it from the running queue */
142 	spin_lock_irqsave(&async_lock, flags);
143 	list_del(&entry->list);
144 
145 	/* 5) free the entry  */
146 	kfree(entry);
147 	atomic_dec(&entry_count);
148 
149 	spin_unlock_irqrestore(&async_lock, flags);
150 
151 	/* 6) wake up any waiters. */
152 	wake_up(&async_done);
153 	return;
154 
155 out:
156 	spin_unlock_irqrestore(&async_lock, flags);
157 }
158 
159 
160 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
161 {
162 	struct async_entry *entry;
163 	unsigned long flags;
164 	async_cookie_t newcookie;
165 
166 
167 	/* allow irq-off callers */
168 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
169 
170 	/*
171 	 * If we're out of memory or if there's too much work
172 	 * pending already, we execute synchronously.
173 	 */
174 	if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
175 		kfree(entry);
176 		spin_lock_irqsave(&async_lock, flags);
177 		newcookie = next_cookie++;
178 		spin_unlock_irqrestore(&async_lock, flags);
179 
180 		/* low on memory.. run synchronously */
181 		ptr(data, newcookie);
182 		return newcookie;
183 	}
184 	entry->func = ptr;
185 	entry->data = data;
186 	entry->running = running;
187 
188 	spin_lock_irqsave(&async_lock, flags);
189 	newcookie = entry->cookie = next_cookie++;
190 	list_add_tail(&entry->list, &async_pending);
191 	atomic_inc(&entry_count);
192 	spin_unlock_irqrestore(&async_lock, flags);
193 	wake_up(&async_new);
194 	return newcookie;
195 }
196 
197 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
198 {
199 	return __async_schedule(ptr, data, &async_pending);
200 }
201 EXPORT_SYMBOL_GPL(async_schedule);
202 
203 async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
204 {
205 	return __async_schedule(ptr, data, running);
206 }
207 EXPORT_SYMBOL_GPL(async_schedule_special);
208 
209 void async_synchronize_full(void)
210 {
211 	do {
212 		async_synchronize_cookie(next_cookie);
213 	} while (!list_empty(&async_running) || !list_empty(&async_pending));
214 }
215 EXPORT_SYMBOL_GPL(async_synchronize_full);
216 
217 void async_synchronize_full_special(struct list_head *list)
218 {
219 	async_synchronize_cookie_special(next_cookie, list);
220 }
221 EXPORT_SYMBOL_GPL(async_synchronize_full_special);
222 
223 void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
224 {
225 	ktime_t starttime, delta, endtime;
226 
227 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
228 		printk("async_waiting @ %i\n", task_pid_nr(current));
229 		starttime = ktime_get();
230 	}
231 
232 	wait_event(async_done, __lowest_in_progress(running) >= cookie);
233 
234 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
235 		endtime = ktime_get();
236 		delta = ktime_sub(endtime, starttime);
237 
238 		printk("async_continuing @ %i after %lli usec\n",
239 			task_pid_nr(current), ktime_to_ns(delta) >> 10);
240 	}
241 }
242 EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
243 
244 void async_synchronize_cookie(async_cookie_t cookie)
245 {
246 	async_synchronize_cookie_special(cookie, &async_running);
247 }
248 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
249 
250 
251 static int async_thread(void *unused)
252 {
253 	DECLARE_WAITQUEUE(wq, current);
254 	add_wait_queue(&async_new, &wq);
255 
256 	while (!kthread_should_stop()) {
257 		int ret = HZ;
258 		set_current_state(TASK_INTERRUPTIBLE);
259 		/*
260 		 * check the list head without lock.. false positives
261 		 * are dealt with inside run_one_entry() while holding
262 		 * the lock.
263 		 */
264 		rmb();
265 		if (!list_empty(&async_pending))
266 			run_one_entry();
267 		else
268 			ret = schedule_timeout(HZ);
269 
270 		if (ret == 0) {
271 			/*
272 			 * we timed out, this means we as thread are redundant.
273 			 * we sign off and die, but we to avoid any races there
274 			 * is a last-straw check to see if work snuck in.
275 			 */
276 			atomic_dec(&thread_count);
277 			wmb(); /* manager must see our departure first */
278 			if (list_empty(&async_pending))
279 				break;
280 			/*
281 			 * woops work came in between us timing out and us
282 			 * signing off; we need to stay alive and keep working.
283 			 */
284 			atomic_inc(&thread_count);
285 		}
286 	}
287 	remove_wait_queue(&async_new, &wq);
288 
289 	return 0;
290 }
291 
292 static int async_manager_thread(void *unused)
293 {
294 	DECLARE_WAITQUEUE(wq, current);
295 	add_wait_queue(&async_new, &wq);
296 
297 	while (!kthread_should_stop()) {
298 		int tc, ec;
299 
300 		set_current_state(TASK_INTERRUPTIBLE);
301 
302 		tc = atomic_read(&thread_count);
303 		rmb();
304 		ec = atomic_read(&entry_count);
305 
306 		while (tc < ec && tc < MAX_THREADS) {
307 			kthread_run(async_thread, NULL, "async/%i", tc);
308 			atomic_inc(&thread_count);
309 			tc++;
310 		}
311 
312 		schedule();
313 	}
314 	remove_wait_queue(&async_new, &wq);
315 
316 	return 0;
317 }
318 
319 static int __init async_init(void)
320 {
321 	if (async_enabled)
322 		kthread_run(async_manager_thread, NULL, "async/mgr");
323 	return 0;
324 }
325 
326 static int __init setup_async(char *str)
327 {
328 	async_enabled = 1;
329 	return 1;
330 }
331 
332 __setup("fastboot", setup_async);
333 
334 
335 core_initcall(async_init);
336