xref: /openbmc/linux/kernel/async.c (revision ad160d23)
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 
14 /*
15 
16 Goals and Theory of Operation
17 
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21 
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27 
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31 
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34 
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41 
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48 
49 */
50 
51 #include <linux/async.h>
52 #include <linux/module.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/init.h>
56 #include <linux/kthread.h>
57 #include <asm/atomic.h>
58 
59 static async_cookie_t next_cookie = 1;
60 
61 #define MAX_THREADS	256
62 #define MAX_WORK	32768
63 
64 static LIST_HEAD(async_pending);
65 static LIST_HEAD(async_running);
66 static DEFINE_SPINLOCK(async_lock);
67 
68 struct async_entry {
69 	struct list_head list;
70 	async_cookie_t   cookie;
71 	async_func_ptr	 *func;
72 	void             *data;
73 	struct list_head *running;
74 };
75 
76 static DECLARE_WAIT_QUEUE_HEAD(async_done);
77 static DECLARE_WAIT_QUEUE_HEAD(async_new);
78 
79 static atomic_t entry_count;
80 static atomic_t thread_count;
81 
82 extern int initcall_debug;
83 
84 
85 /*
86  * MUST be called with the lock held!
87  */
88 static async_cookie_t  __lowest_in_progress(struct list_head *running)
89 {
90 	struct async_entry *entry;
91 	if (!list_empty(&async_pending)) {
92 		entry = list_first_entry(&async_pending,
93 			struct async_entry, list);
94 		return entry->cookie;
95 	} else if (!list_empty(running)) {
96 		entry = list_first_entry(running,
97 			struct async_entry, list);
98 		return entry->cookie;
99 	} else {
100 		/* nothing in progress... next_cookie is "infinity" */
101 		return next_cookie;
102 	}
103 
104 }
105 /*
106  * pick the first pending entry and run it
107  */
108 static void run_one_entry(void)
109 {
110 	unsigned long flags;
111 	struct async_entry *entry;
112 	ktime_t calltime, delta, rettime;
113 
114 	/* 1) pick one task from the pending queue */
115 
116 	spin_lock_irqsave(&async_lock, flags);
117 	if (list_empty(&async_pending))
118 		goto out;
119 	entry = list_first_entry(&async_pending, struct async_entry, list);
120 
121 	/* 2) move it to the running queue */
122 	list_del(&entry->list);
123 	list_add_tail(&entry->list, &async_running);
124 	spin_unlock_irqrestore(&async_lock, flags);
125 
126 	/* 3) run it (and print duration)*/
127 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
128 		printk("calling  %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
129 		calltime = ktime_get();
130 	}
131 	entry->func(entry->data, entry->cookie);
132 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
133 		rettime = ktime_get();
134 		delta = ktime_sub(rettime, calltime);
135 		printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
136 			entry->func, ktime_to_ns(delta) >> 10);
137 	}
138 
139 	/* 4) remove it from the running queue */
140 	spin_lock_irqsave(&async_lock, flags);
141 	list_del(&entry->list);
142 
143 	/* 5) free the entry  */
144 	kfree(entry);
145 	atomic_dec(&entry_count);
146 
147 	spin_unlock_irqrestore(&async_lock, flags);
148 
149 	/* 6) wake up any waiters. */
150 	wake_up(&async_done);
151 	return;
152 
153 out:
154 	spin_unlock_irqrestore(&async_lock, flags);
155 }
156 
157 
158 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
159 {
160 	struct async_entry *entry;
161 	unsigned long flags;
162 	async_cookie_t newcookie;
163 
164 
165 	/* allow irq-off callers */
166 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
167 
168 	/*
169 	 * If we're out of memory or if there's too much work
170 	 * pending already, we execute synchronously.
171 	 */
172 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
173 		kfree(entry);
174 		spin_lock_irqsave(&async_lock, flags);
175 		newcookie = next_cookie++;
176 		spin_unlock_irqrestore(&async_lock, flags);
177 
178 		/* low on memory.. run synchronously */
179 		ptr(data, newcookie);
180 		return newcookie;
181 	}
182 	entry->func = ptr;
183 	entry->data = data;
184 	entry->running = running;
185 
186 	spin_lock_irqsave(&async_lock, flags);
187 	newcookie = entry->cookie = next_cookie++;
188 	list_add_tail(&entry->list, &async_pending);
189 	atomic_inc(&entry_count);
190 	spin_unlock_irqrestore(&async_lock, flags);
191 	wake_up(&async_new);
192 	return newcookie;
193 }
194 
195 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
196 {
197 	return __async_schedule(ptr, data, &async_pending);
198 }
199 EXPORT_SYMBOL_GPL(async_schedule);
200 
201 async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
202 {
203 	return __async_schedule(ptr, data, running);
204 }
205 EXPORT_SYMBOL_GPL(async_schedule_special);
206 
207 void async_synchronize_full(void)
208 {
209 	async_synchronize_cookie(next_cookie);
210 }
211 EXPORT_SYMBOL_GPL(async_synchronize_full);
212 
213 void async_synchronize_full_special(struct list_head *list)
214 {
215 	async_synchronize_cookie_special(next_cookie, list);
216 }
217 EXPORT_SYMBOL_GPL(async_synchronize_full_special);
218 
219 void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
220 {
221 	ktime_t starttime, delta, endtime;
222 
223 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
224 		printk("async_waiting @ %i\n", task_pid_nr(current));
225 		starttime = ktime_get();
226 	}
227 
228 	wait_event(async_done, __lowest_in_progress(running) >= cookie);
229 
230 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
231 		endtime = ktime_get();
232 		delta = ktime_sub(endtime, starttime);
233 
234 		printk("async_continuing @ %i after %lli usec\n",
235 			task_pid_nr(current), ktime_to_ns(delta) >> 10);
236 	}
237 }
238 EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
239 
240 void async_synchronize_cookie(async_cookie_t cookie)
241 {
242 	async_synchronize_cookie_special(cookie, &async_running);
243 }
244 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
245 
246 
247 static int async_thread(void *unused)
248 {
249 	DECLARE_WAITQUEUE(wq, current);
250 	add_wait_queue(&async_new, &wq);
251 
252 	while (!kthread_should_stop()) {
253 		int ret = HZ;
254 		set_current_state(TASK_INTERRUPTIBLE);
255 		/*
256 		 * check the list head without lock.. false positives
257 		 * are dealt with inside run_one_entry() while holding
258 		 * the lock.
259 		 */
260 		rmb();
261 		if (!list_empty(&async_pending))
262 			run_one_entry();
263 		else
264 			ret = schedule_timeout(HZ);
265 
266 		if (ret == 0) {
267 			/*
268 			 * we timed out, this means we as thread are redundant.
269 			 * we sign off and die, but we to avoid any races there
270 			 * is a last-straw check to see if work snuck in.
271 			 */
272 			atomic_dec(&thread_count);
273 			wmb(); /* manager must see our departure first */
274 			if (list_empty(&async_pending))
275 				break;
276 			/*
277 			 * woops work came in between us timing out and us
278 			 * signing off; we need to stay alive and keep working.
279 			 */
280 			atomic_inc(&thread_count);
281 		}
282 	}
283 	remove_wait_queue(&async_new, &wq);
284 
285 	return 0;
286 }
287 
288 static int async_manager_thread(void *unused)
289 {
290 	DECLARE_WAITQUEUE(wq, current);
291 	add_wait_queue(&async_new, &wq);
292 
293 	while (!kthread_should_stop()) {
294 		int tc, ec;
295 
296 		set_current_state(TASK_INTERRUPTIBLE);
297 
298 		tc = atomic_read(&thread_count);
299 		rmb();
300 		ec = atomic_read(&entry_count);
301 
302 		while (tc < ec && tc < MAX_THREADS) {
303 			kthread_run(async_thread, NULL, "async/%i", tc);
304 			atomic_inc(&thread_count);
305 			tc++;
306 		}
307 
308 		schedule();
309 	}
310 	remove_wait_queue(&async_new, &wq);
311 
312 	return 0;
313 }
314 
315 static int __init async_init(void)
316 {
317 	kthread_run(async_manager_thread, NULL, "async/mgr");
318 	return 0;
319 }
320 
321 core_initcall(async_init);
322