1 /* 2 * async.c: Asynchronous function calls for boot performance 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * Author: Arjan van de Ven <arjan@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 14 /* 15 16 Goals and Theory of Operation 17 18 The primary goal of this feature is to reduce the kernel boot time, 19 by doing various independent hardware delays and discovery operations 20 decoupled and not strictly serialized. 21 22 More specifically, the asynchronous function call concept allows 23 certain operations (primarily during system boot) to happen 24 asynchronously, out of order, while these operations still 25 have their externally visible parts happen sequentially and in-order. 26 (not unlike how out-of-order CPUs retire their instructions in order) 27 28 Key to the asynchronous function call implementation is the concept of 29 a "sequence cookie" (which, although it has an abstracted type, can be 30 thought of as a monotonically incrementing number). 31 32 The async core will assign each scheduled event such a sequence cookie and 33 pass this to the called functions. 34 35 The asynchronously called function should before doing a globally visible 36 operation, such as registering device numbers, call the 37 async_synchronize_cookie() function and pass in its own cookie. The 38 async_synchronize_cookie() function will make sure that all asynchronous 39 operations that were scheduled prior to the operation corresponding with the 40 cookie have completed. 41 42 Subsystem/driver initialization code that scheduled asynchronous probe 43 functions, but which shares global resources with other drivers/subsystems 44 that do not use the asynchronous call feature, need to do a full 45 synchronization with the async_synchronize_full() function, before returning 46 from their init function. This is to maintain strict ordering between the 47 asynchronous and synchronous parts of the kernel. 48 49 */ 50 51 #include <linux/async.h> 52 #include <linux/module.h> 53 #include <linux/wait.h> 54 #include <linux/sched.h> 55 #include <linux/init.h> 56 #include <linux/kthread.h> 57 #include <linux/delay.h> 58 #include <asm/atomic.h> 59 60 static async_cookie_t next_cookie = 1; 61 62 #define MAX_THREADS 256 63 #define MAX_WORK 32768 64 65 static LIST_HEAD(async_pending); 66 static LIST_HEAD(async_running); 67 static DEFINE_SPINLOCK(async_lock); 68 69 static int async_enabled = 0; 70 71 struct async_entry { 72 struct list_head list; 73 async_cookie_t cookie; 74 async_func_ptr *func; 75 void *data; 76 struct list_head *running; 77 }; 78 79 static DECLARE_WAIT_QUEUE_HEAD(async_done); 80 static DECLARE_WAIT_QUEUE_HEAD(async_new); 81 82 static atomic_t entry_count; 83 static atomic_t thread_count; 84 85 extern int initcall_debug; 86 87 88 /* 89 * MUST be called with the lock held! 90 */ 91 static async_cookie_t __lowest_in_progress(struct list_head *running) 92 { 93 struct async_entry *entry; 94 if (!list_empty(running)) { 95 entry = list_first_entry(running, 96 struct async_entry, list); 97 return entry->cookie; 98 } else if (!list_empty(&async_pending)) { 99 entry = list_first_entry(&async_pending, 100 struct async_entry, list); 101 return entry->cookie; 102 } else { 103 /* nothing in progress... next_cookie is "infinity" */ 104 return next_cookie; 105 } 106 107 } 108 109 static async_cookie_t lowest_in_progress(struct list_head *running) 110 { 111 unsigned long flags; 112 async_cookie_t ret; 113 114 spin_lock_irqsave(&async_lock, flags); 115 ret = __lowest_in_progress(running); 116 spin_unlock_irqrestore(&async_lock, flags); 117 return ret; 118 } 119 /* 120 * pick the first pending entry and run it 121 */ 122 static void run_one_entry(void) 123 { 124 unsigned long flags; 125 struct async_entry *entry; 126 ktime_t calltime, delta, rettime; 127 128 /* 1) pick one task from the pending queue */ 129 130 spin_lock_irqsave(&async_lock, flags); 131 if (list_empty(&async_pending)) 132 goto out; 133 entry = list_first_entry(&async_pending, struct async_entry, list); 134 135 /* 2) move it to the running queue */ 136 list_del(&entry->list); 137 list_add_tail(&entry->list, entry->running); 138 spin_unlock_irqrestore(&async_lock, flags); 139 140 /* 3) run it (and print duration)*/ 141 if (initcall_debug && system_state == SYSTEM_BOOTING) { 142 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, 143 entry->func, task_pid_nr(current)); 144 calltime = ktime_get(); 145 } 146 entry->func(entry->data, entry->cookie); 147 if (initcall_debug && system_state == SYSTEM_BOOTING) { 148 rettime = ktime_get(); 149 delta = ktime_sub(rettime, calltime); 150 printk("initcall %lli_%pF returned 0 after %lld usecs\n", 151 (long long)entry->cookie, 152 entry->func, 153 (long long)ktime_to_ns(delta) >> 10); 154 } 155 156 /* 4) remove it from the running queue */ 157 spin_lock_irqsave(&async_lock, flags); 158 list_del(&entry->list); 159 160 /* 5) free the entry */ 161 kfree(entry); 162 atomic_dec(&entry_count); 163 164 spin_unlock_irqrestore(&async_lock, flags); 165 166 /* 6) wake up any waiters. */ 167 wake_up(&async_done); 168 return; 169 170 out: 171 spin_unlock_irqrestore(&async_lock, flags); 172 } 173 174 175 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) 176 { 177 struct async_entry *entry; 178 unsigned long flags; 179 async_cookie_t newcookie; 180 181 182 /* allow irq-off callers */ 183 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); 184 185 /* 186 * If we're out of memory or if there's too much work 187 * pending already, we execute synchronously. 188 */ 189 if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { 190 kfree(entry); 191 spin_lock_irqsave(&async_lock, flags); 192 newcookie = next_cookie++; 193 spin_unlock_irqrestore(&async_lock, flags); 194 195 /* low on memory.. run synchronously */ 196 ptr(data, newcookie); 197 return newcookie; 198 } 199 entry->func = ptr; 200 entry->data = data; 201 entry->running = running; 202 203 spin_lock_irqsave(&async_lock, flags); 204 newcookie = entry->cookie = next_cookie++; 205 list_add_tail(&entry->list, &async_pending); 206 atomic_inc(&entry_count); 207 spin_unlock_irqrestore(&async_lock, flags); 208 wake_up(&async_new); 209 return newcookie; 210 } 211 212 /** 213 * async_schedule - schedule a function for asynchronous execution 214 * @ptr: function to execute asynchronously 215 * @data: data pointer to pass to the function 216 * 217 * Returns an async_cookie_t that may be used for checkpointing later. 218 * Note: This function may be called from atomic or non-atomic contexts. 219 */ 220 async_cookie_t async_schedule(async_func_ptr *ptr, void *data) 221 { 222 return __async_schedule(ptr, data, &async_running); 223 } 224 EXPORT_SYMBOL_GPL(async_schedule); 225 226 /** 227 * async_schedule_special - schedule a function for asynchronous execution with a special running queue 228 * @ptr: function to execute asynchronously 229 * @data: data pointer to pass to the function 230 * @running: list head to add to while running 231 * 232 * Returns an async_cookie_t that may be used for checkpointing later. 233 * @running may be used in the async_synchronize_*_special() functions 234 * to wait on a special running queue rather than on the global running 235 * queue. 236 * Note: This function may be called from atomic or non-atomic contexts. 237 */ 238 async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) 239 { 240 return __async_schedule(ptr, data, running); 241 } 242 EXPORT_SYMBOL_GPL(async_schedule_special); 243 244 /** 245 * async_synchronize_full - synchronize all asynchronous function calls 246 * 247 * This function waits until all asynchronous function calls have been done. 248 */ 249 void async_synchronize_full(void) 250 { 251 do { 252 async_synchronize_cookie(next_cookie); 253 } while (!list_empty(&async_running) || !list_empty(&async_pending)); 254 } 255 EXPORT_SYMBOL_GPL(async_synchronize_full); 256 257 /** 258 * async_synchronize_full_special - synchronize all asynchronous function calls for a running list 259 * @list: running list to synchronize on 260 * 261 * This function waits until all asynchronous function calls for the running 262 * list @list have been done. 263 */ 264 void async_synchronize_full_special(struct list_head *list) 265 { 266 async_synchronize_cookie_special(next_cookie, list); 267 } 268 EXPORT_SYMBOL_GPL(async_synchronize_full_special); 269 270 /** 271 * async_synchronize_cookie_special - synchronize asynchronous function calls on a running list with cookie checkpointing 272 * @cookie: async_cookie_t to use as checkpoint 273 * @running: running list to synchronize on 274 * 275 * This function waits until all asynchronous function calls for the running 276 * list @list submitted prior to @cookie have been done. 277 */ 278 void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) 279 { 280 ktime_t starttime, delta, endtime; 281 282 if (initcall_debug && system_state == SYSTEM_BOOTING) { 283 printk("async_waiting @ %i\n", task_pid_nr(current)); 284 starttime = ktime_get(); 285 } 286 287 wait_event(async_done, lowest_in_progress(running) >= cookie); 288 289 if (initcall_debug && system_state == SYSTEM_BOOTING) { 290 endtime = ktime_get(); 291 delta = ktime_sub(endtime, starttime); 292 293 printk("async_continuing @ %i after %lli usec\n", 294 task_pid_nr(current), 295 (long long)ktime_to_ns(delta) >> 10); 296 } 297 } 298 EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); 299 300 /** 301 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing 302 * @cookie: async_cookie_t to use as checkpoint 303 * 304 * This function waits until all asynchronous function calls prior to @cookie 305 * have been done. 306 */ 307 void async_synchronize_cookie(async_cookie_t cookie) 308 { 309 async_synchronize_cookie_special(cookie, &async_running); 310 } 311 EXPORT_SYMBOL_GPL(async_synchronize_cookie); 312 313 314 static int async_thread(void *unused) 315 { 316 DECLARE_WAITQUEUE(wq, current); 317 add_wait_queue(&async_new, &wq); 318 319 while (!kthread_should_stop()) { 320 int ret = HZ; 321 set_current_state(TASK_INTERRUPTIBLE); 322 /* 323 * check the list head without lock.. false positives 324 * are dealt with inside run_one_entry() while holding 325 * the lock. 326 */ 327 rmb(); 328 if (!list_empty(&async_pending)) 329 run_one_entry(); 330 else 331 ret = schedule_timeout(HZ); 332 333 if (ret == 0) { 334 /* 335 * we timed out, this means we as thread are redundant. 336 * we sign off and die, but we to avoid any races there 337 * is a last-straw check to see if work snuck in. 338 */ 339 atomic_dec(&thread_count); 340 wmb(); /* manager must see our departure first */ 341 if (list_empty(&async_pending)) 342 break; 343 /* 344 * woops work came in between us timing out and us 345 * signing off; we need to stay alive and keep working. 346 */ 347 atomic_inc(&thread_count); 348 } 349 } 350 remove_wait_queue(&async_new, &wq); 351 352 return 0; 353 } 354 355 static int async_manager_thread(void *unused) 356 { 357 DECLARE_WAITQUEUE(wq, current); 358 add_wait_queue(&async_new, &wq); 359 360 while (!kthread_should_stop()) { 361 int tc, ec; 362 363 set_current_state(TASK_INTERRUPTIBLE); 364 365 tc = atomic_read(&thread_count); 366 rmb(); 367 ec = atomic_read(&entry_count); 368 369 while (tc < ec && tc < MAX_THREADS) { 370 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", 371 tc))) { 372 msleep(100); 373 continue; 374 } 375 atomic_inc(&thread_count); 376 tc++; 377 } 378 379 schedule(); 380 } 381 remove_wait_queue(&async_new, &wq); 382 383 return 0; 384 } 385 386 static int __init async_init(void) 387 { 388 if (async_enabled) 389 if (IS_ERR(kthread_run(async_manager_thread, NULL, 390 "async/mgr"))) 391 async_enabled = 0; 392 return 0; 393 } 394 395 static int __init setup_async(char *str) 396 { 397 async_enabled = 1; 398 return 1; 399 } 400 401 __setup("fastboot", setup_async); 402 403 404 core_initcall(async_init); 405