1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #include <linux/completion.h> 26 #include <linux/interrupt.h> 27 #include <linux/notifier.h> 28 #include <linux/rcupdate_wait.h> 29 #include <linux/kernel.h> 30 #include <linux/export.h> 31 #include <linux/mutex.h> 32 #include <linux/sched.h> 33 #include <linux/types.h> 34 #include <linux/init.h> 35 #include <linux/time.h> 36 #include <linux/cpu.h> 37 #include <linux/prefetch.h> 38 39 #include "rcu.h" 40 41 /* Global control variables for rcupdate callback mechanism. */ 42 struct rcu_ctrlblk { 43 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ 44 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ 45 struct rcu_head **curtail; /* ->next pointer of last CB. */ 46 }; 47 48 /* Definition for rcupdate control block. */ 49 static struct rcu_ctrlblk rcu_sched_ctrlblk = { 50 .donetail = &rcu_sched_ctrlblk.rcucblist, 51 .curtail = &rcu_sched_ctrlblk.rcucblist, 52 }; 53 54 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 55 .donetail = &rcu_bh_ctrlblk.rcucblist, 56 .curtail = &rcu_bh_ctrlblk.rcucblist, 57 }; 58 59 void rcu_barrier_bh(void) 60 { 61 wait_rcu_gp(call_rcu_bh); 62 } 63 EXPORT_SYMBOL(rcu_barrier_bh); 64 65 void rcu_barrier_sched(void) 66 { 67 wait_rcu_gp(call_rcu_sched); 68 } 69 EXPORT_SYMBOL(rcu_barrier_sched); 70 71 /* 72 * Helper function for rcu_sched_qs() and rcu_bh_qs(). 73 * Also irqs are disabled to avoid confusion due to interrupt handlers 74 * invoking call_rcu(). 75 */ 76 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) 77 { 78 if (rcp->donetail != rcp->curtail) { 79 rcp->donetail = rcp->curtail; 80 return 1; 81 } 82 83 return 0; 84 } 85 86 /* 87 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 88 * are at it, given that any rcu quiescent state is also an rcu_bh 89 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 90 */ 91 void rcu_sched_qs(void) 92 { 93 unsigned long flags; 94 95 local_irq_save(flags); 96 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 97 rcu_qsctr_help(&rcu_bh_ctrlblk)) 98 raise_softirq(RCU_SOFTIRQ); 99 local_irq_restore(flags); 100 } 101 102 /* 103 * Record an rcu_bh quiescent state. 104 */ 105 void rcu_bh_qs(void) 106 { 107 unsigned long flags; 108 109 local_irq_save(flags); 110 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 111 raise_softirq(RCU_SOFTIRQ); 112 local_irq_restore(flags); 113 } 114 115 /* 116 * Check to see if the scheduling-clock interrupt came from an extended 117 * quiescent state, and, if so, tell RCU about it. This function must 118 * be called from hardirq context. It is normally called from the 119 * scheduling-clock interrupt. 120 */ 121 void rcu_check_callbacks(int user) 122 { 123 if (user) 124 rcu_sched_qs(); 125 if (user || !in_softirq()) 126 rcu_bh_qs(); 127 } 128 129 /* 130 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 131 * whose grace period has elapsed. 132 */ 133 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) 134 { 135 struct rcu_head *next, *list; 136 unsigned long flags; 137 138 /* Move the ready-to-invoke callbacks to a local list. */ 139 local_irq_save(flags); 140 if (rcp->donetail == &rcp->rcucblist) { 141 /* No callbacks ready, so just leave. */ 142 local_irq_restore(flags); 143 return; 144 } 145 list = rcp->rcucblist; 146 rcp->rcucblist = *rcp->donetail; 147 *rcp->donetail = NULL; 148 if (rcp->curtail == rcp->donetail) 149 rcp->curtail = &rcp->rcucblist; 150 rcp->donetail = &rcp->rcucblist; 151 local_irq_restore(flags); 152 153 /* Invoke the callbacks on the local list. */ 154 while (list) { 155 next = list->next; 156 prefetch(next); 157 debug_rcu_head_unqueue(list); 158 local_bh_disable(); 159 __rcu_reclaim("", list); 160 local_bh_enable(); 161 list = next; 162 } 163 } 164 165 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) 166 { 167 __rcu_process_callbacks(&rcu_sched_ctrlblk); 168 __rcu_process_callbacks(&rcu_bh_ctrlblk); 169 } 170 171 /* 172 * Wait for a grace period to elapse. But it is illegal to invoke 173 * synchronize_sched() from within an RCU read-side critical section. 174 * Therefore, any legal call to synchronize_sched() is a quiescent 175 * state, and so on a UP system, synchronize_sched() need do nothing. 176 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the 177 * benefits of doing might_sleep() to reduce latency.) 178 * 179 * Cool, huh? (Due to Josh Triplett.) 180 */ 181 void synchronize_sched(void) 182 { 183 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 184 lock_is_held(&rcu_lock_map) || 185 lock_is_held(&rcu_sched_lock_map), 186 "Illegal synchronize_sched() in RCU read-side critical section"); 187 } 188 EXPORT_SYMBOL_GPL(synchronize_sched); 189 190 /* 191 * Helper function for call_rcu() and call_rcu_bh(). 192 */ 193 static void __call_rcu(struct rcu_head *head, 194 rcu_callback_t func, 195 struct rcu_ctrlblk *rcp) 196 { 197 unsigned long flags; 198 199 debug_rcu_head_queue(head); 200 head->func = func; 201 head->next = NULL; 202 203 local_irq_save(flags); 204 *rcp->curtail = head; 205 rcp->curtail = &head->next; 206 local_irq_restore(flags); 207 208 if (unlikely(is_idle_task(current))) { 209 /* force scheduling for rcu_sched_qs() */ 210 resched_cpu(0); 211 } 212 } 213 214 /* 215 * Post an RCU callback to be invoked after the end of an RCU-sched grace 216 * period. But since we have but one CPU, that would be after any 217 * quiescent state. 218 */ 219 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 220 { 221 __call_rcu(head, func, &rcu_sched_ctrlblk); 222 } 223 EXPORT_SYMBOL_GPL(call_rcu_sched); 224 225 /* 226 * Post an RCU bottom-half callback to be invoked after any subsequent 227 * quiescent state. 228 */ 229 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 230 { 231 __call_rcu(head, func, &rcu_bh_ctrlblk); 232 } 233 EXPORT_SYMBOL_GPL(call_rcu_bh); 234 235 void __init rcu_init(void) 236 { 237 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 238 rcu_early_boot_tests(); 239 } 240