xref: /openbmc/linux/kernel/async.c (revision d0b73b48)
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 
14 /*
15 
16 Goals and Theory of Operation
17 
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21 
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27 
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31 
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34 
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41 
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48 
49 */
50 
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59 
60 static async_cookie_t next_cookie = 1;
61 
62 #define MAX_WORK	32768
63 
64 static LIST_HEAD(async_pending);
65 static ASYNC_DOMAIN(async_running);
66 static LIST_HEAD(async_domains);
67 static DEFINE_SPINLOCK(async_lock);
68 static DEFINE_MUTEX(async_register_mutex);
69 
70 struct async_entry {
71 	struct list_head	list;
72 	struct work_struct	work;
73 	async_cookie_t		cookie;
74 	async_func_ptr		*func;
75 	void			*data;
76 	struct async_domain	*running;
77 };
78 
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
80 
81 static atomic_t entry_count;
82 
83 
84 /*
85  * MUST be called with the lock held!
86  */
87 static async_cookie_t  __lowest_in_progress(struct async_domain *running)
88 {
89 	async_cookie_t first_running = next_cookie;	/* infinity value */
90 	async_cookie_t first_pending = next_cookie;	/* ditto */
91 	struct async_entry *entry;
92 
93 	/*
94 	 * Both running and pending lists are sorted but not disjoint.
95 	 * Take the first cookies from both and return the min.
96 	 */
97 	if (!list_empty(&running->domain)) {
98 		entry = list_first_entry(&running->domain, typeof(*entry), list);
99 		first_running = entry->cookie;
100 	}
101 
102 	list_for_each_entry(entry, &async_pending, list) {
103 		if (entry->running == running) {
104 			first_pending = entry->cookie;
105 			break;
106 		}
107 	}
108 
109 	return min(first_running, first_pending);
110 }
111 
112 static async_cookie_t  lowest_in_progress(struct async_domain *running)
113 {
114 	unsigned long flags;
115 	async_cookie_t ret;
116 
117 	spin_lock_irqsave(&async_lock, flags);
118 	ret = __lowest_in_progress(running);
119 	spin_unlock_irqrestore(&async_lock, flags);
120 	return ret;
121 }
122 
123 /*
124  * pick the first pending entry and run it
125  */
126 static void async_run_entry_fn(struct work_struct *work)
127 {
128 	struct async_entry *entry =
129 		container_of(work, struct async_entry, work);
130 	struct async_entry *pos;
131 	unsigned long flags;
132 	ktime_t uninitialized_var(calltime), delta, rettime;
133 	struct async_domain *running = entry->running;
134 
135 	/* 1) move self to the running queue, make sure it stays sorted */
136 	spin_lock_irqsave(&async_lock, flags);
137 	list_for_each_entry_reverse(pos, &running->domain, list)
138 		if (entry->cookie < pos->cookie)
139 			break;
140 	list_move_tail(&entry->list, &pos->list);
141 	spin_unlock_irqrestore(&async_lock, flags);
142 
143 	/* 2) run (and print duration) */
144 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
145 		printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
146 			(long long)entry->cookie,
147 			entry->func, task_pid_nr(current));
148 		calltime = ktime_get();
149 	}
150 	entry->func(entry->data, entry->cookie);
151 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
152 		rettime = ktime_get();
153 		delta = ktime_sub(rettime, calltime);
154 		printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
155 			(long long)entry->cookie,
156 			entry->func,
157 			(long long)ktime_to_ns(delta) >> 10);
158 	}
159 
160 	/* 3) remove self from the running queue */
161 	spin_lock_irqsave(&async_lock, flags);
162 	list_del(&entry->list);
163 	if (running->registered && --running->count == 0)
164 		list_del_init(&running->node);
165 
166 	/* 4) free the entry */
167 	kfree(entry);
168 	atomic_dec(&entry_count);
169 
170 	spin_unlock_irqrestore(&async_lock, flags);
171 
172 	/* 5) wake up any waiters */
173 	wake_up(&async_done);
174 }
175 
176 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
177 {
178 	struct async_entry *entry;
179 	unsigned long flags;
180 	async_cookie_t newcookie;
181 
182 	/* allow irq-off callers */
183 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
184 
185 	/*
186 	 * If we're out of memory or if there's too much work
187 	 * pending already, we execute synchronously.
188 	 */
189 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
190 		kfree(entry);
191 		spin_lock_irqsave(&async_lock, flags);
192 		newcookie = next_cookie++;
193 		spin_unlock_irqrestore(&async_lock, flags);
194 
195 		/* low on memory.. run synchronously */
196 		ptr(data, newcookie);
197 		return newcookie;
198 	}
199 	INIT_WORK(&entry->work, async_run_entry_fn);
200 	entry->func = ptr;
201 	entry->data = data;
202 	entry->running = running;
203 
204 	spin_lock_irqsave(&async_lock, flags);
205 	newcookie = entry->cookie = next_cookie++;
206 	list_add_tail(&entry->list, &async_pending);
207 	if (running->registered && running->count++ == 0)
208 		list_add_tail(&running->node, &async_domains);
209 	atomic_inc(&entry_count);
210 	spin_unlock_irqrestore(&async_lock, flags);
211 
212 	/* mark that this task has queued an async job, used by module init */
213 	current->flags |= PF_USED_ASYNC;
214 
215 	/* schedule for execution */
216 	queue_work(system_unbound_wq, &entry->work);
217 
218 	return newcookie;
219 }
220 
221 /**
222  * async_schedule - schedule a function for asynchronous execution
223  * @ptr: function to execute asynchronously
224  * @data: data pointer to pass to the function
225  *
226  * Returns an async_cookie_t that may be used for checkpointing later.
227  * Note: This function may be called from atomic or non-atomic contexts.
228  */
229 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
230 {
231 	return __async_schedule(ptr, data, &async_running);
232 }
233 EXPORT_SYMBOL_GPL(async_schedule);
234 
235 /**
236  * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
237  * @ptr: function to execute asynchronously
238  * @data: data pointer to pass to the function
239  * @running: running list for the domain
240  *
241  * Returns an async_cookie_t that may be used for checkpointing later.
242  * @running may be used in the async_synchronize_*_domain() functions
243  * to wait within a certain synchronization domain rather than globally.
244  * A synchronization domain is specified via the running queue @running to use.
245  * Note: This function may be called from atomic or non-atomic contexts.
246  */
247 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
248 				     struct async_domain *running)
249 {
250 	return __async_schedule(ptr, data, running);
251 }
252 EXPORT_SYMBOL_GPL(async_schedule_domain);
253 
254 /**
255  * async_synchronize_full - synchronize all asynchronous function calls
256  *
257  * This function waits until all asynchronous function calls have been done.
258  */
259 void async_synchronize_full(void)
260 {
261 	mutex_lock(&async_register_mutex);
262 	do {
263 		struct async_domain *domain = NULL;
264 
265 		spin_lock_irq(&async_lock);
266 		if (!list_empty(&async_domains))
267 			domain = list_first_entry(&async_domains, typeof(*domain), node);
268 		spin_unlock_irq(&async_lock);
269 
270 		async_synchronize_cookie_domain(next_cookie, domain);
271 	} while (!list_empty(&async_domains));
272 	mutex_unlock(&async_register_mutex);
273 }
274 EXPORT_SYMBOL_GPL(async_synchronize_full);
275 
276 /**
277  * async_unregister_domain - ensure no more anonymous waiters on this domain
278  * @domain: idle domain to flush out of any async_synchronize_full instances
279  *
280  * async_synchronize_{cookie|full}_domain() are not flushed since callers
281  * of these routines should know the lifetime of @domain
282  *
283  * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
284  */
285 void async_unregister_domain(struct async_domain *domain)
286 {
287 	mutex_lock(&async_register_mutex);
288 	spin_lock_irq(&async_lock);
289 	WARN_ON(!domain->registered || !list_empty(&domain->node) ||
290 		!list_empty(&domain->domain));
291 	domain->registered = 0;
292 	spin_unlock_irq(&async_lock);
293 	mutex_unlock(&async_register_mutex);
294 }
295 EXPORT_SYMBOL_GPL(async_unregister_domain);
296 
297 /**
298  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
299  * @domain: running list to synchronize on
300  *
301  * This function waits until all asynchronous function calls for the
302  * synchronization domain specified by the running list @domain have been done.
303  */
304 void async_synchronize_full_domain(struct async_domain *domain)
305 {
306 	async_synchronize_cookie_domain(next_cookie, domain);
307 }
308 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
309 
310 /**
311  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
312  * @cookie: async_cookie_t to use as checkpoint
313  * @running: running list to synchronize on
314  *
315  * This function waits until all asynchronous function calls for the
316  * synchronization domain specified by running list @running submitted
317  * prior to @cookie have been done.
318  */
319 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
320 {
321 	ktime_t uninitialized_var(starttime), delta, endtime;
322 
323 	if (!running)
324 		return;
325 
326 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
327 		printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
328 		starttime = ktime_get();
329 	}
330 
331 	wait_event(async_done, lowest_in_progress(running) >= cookie);
332 
333 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
334 		endtime = ktime_get();
335 		delta = ktime_sub(endtime, starttime);
336 
337 		printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
338 			task_pid_nr(current),
339 			(long long)ktime_to_ns(delta) >> 10);
340 	}
341 }
342 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
343 
344 /**
345  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
346  * @cookie: async_cookie_t to use as checkpoint
347  *
348  * This function waits until all asynchronous function calls prior to @cookie
349  * have been done.
350  */
351 void async_synchronize_cookie(async_cookie_t cookie)
352 {
353 	async_synchronize_cookie_domain(cookie, &async_running);
354 }
355 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
356