1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #ifndef __LINUX_TINY_H 26 #define __LINUX_TINY_H 27 28 #include <linux/cache.h> 29 30 static inline unsigned long get_state_synchronize_rcu(void) 31 { 32 return 0; 33 } 34 35 static inline void cond_synchronize_rcu(unsigned long oldstate) 36 { 37 might_sleep(); 38 } 39 40 static inline unsigned long get_state_synchronize_sched(void) 41 { 42 return 0; 43 } 44 45 static inline void cond_synchronize_sched(unsigned long oldstate) 46 { 47 might_sleep(); 48 } 49 50 static inline void rcu_barrier_bh(void) 51 { 52 wait_rcu_gp(call_rcu_bh); 53 } 54 55 static inline void rcu_barrier_sched(void) 56 { 57 wait_rcu_gp(call_rcu_sched); 58 } 59 60 static inline void synchronize_rcu_expedited(void) 61 { 62 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 63 } 64 65 static inline void rcu_barrier(void) 66 { 67 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 68 } 69 70 static inline void synchronize_rcu_bh(void) 71 { 72 synchronize_sched(); 73 } 74 75 static inline void synchronize_rcu_bh_expedited(void) 76 { 77 synchronize_sched(); 78 } 79 80 static inline void synchronize_sched_expedited(void) 81 { 82 synchronize_sched(); 83 } 84 85 static inline void kfree_call_rcu(struct rcu_head *head, 86 rcu_callback_t func) 87 { 88 call_rcu(head, func); 89 } 90 91 static inline void rcu_note_context_switch(void) 92 { 93 rcu_sched_qs(); 94 } 95 96 /* 97 * Take advantage of the fact that there is only one CPU, which 98 * allows us to ignore virtualization-based context switches. 99 */ 100 static inline void rcu_virt_note_context_switch(int cpu) 101 { 102 } 103 104 /* 105 * Return the number of grace periods started. 106 */ 107 static inline unsigned long rcu_batches_started(void) 108 { 109 return 0; 110 } 111 112 /* 113 * Return the number of bottom-half grace periods started. 114 */ 115 static inline unsigned long rcu_batches_started_bh(void) 116 { 117 return 0; 118 } 119 120 /* 121 * Return the number of sched grace periods started. 122 */ 123 static inline unsigned long rcu_batches_started_sched(void) 124 { 125 return 0; 126 } 127 128 /* 129 * Return the number of grace periods completed. 130 */ 131 static inline unsigned long rcu_batches_completed(void) 132 { 133 return 0; 134 } 135 136 /* 137 * Return the number of bottom-half grace periods completed. 138 */ 139 static inline unsigned long rcu_batches_completed_bh(void) 140 { 141 return 0; 142 } 143 144 /* 145 * Return the number of sched grace periods completed. 146 */ 147 static inline unsigned long rcu_batches_completed_sched(void) 148 { 149 return 0; 150 } 151 152 /* 153 * Return the number of expedited grace periods completed. 154 */ 155 static inline unsigned long rcu_exp_batches_completed(void) 156 { 157 return 0; 158 } 159 160 /* 161 * Return the number of expedited sched grace periods completed. 162 */ 163 static inline unsigned long rcu_exp_batches_completed_sched(void) 164 { 165 return 0; 166 } 167 168 static inline void rcu_force_quiescent_state(void) 169 { 170 } 171 172 static inline void rcu_bh_force_quiescent_state(void) 173 { 174 } 175 176 static inline void rcu_sched_force_quiescent_state(void) 177 { 178 } 179 180 static inline void show_rcu_gp_kthreads(void) 181 { 182 } 183 184 static inline void rcu_cpu_stall_reset(void) 185 { 186 } 187 188 static inline void rcu_idle_enter(void) 189 { 190 } 191 192 static inline void rcu_idle_exit(void) 193 { 194 } 195 196 static inline void rcu_irq_enter(void) 197 { 198 } 199 200 static inline void rcu_irq_exit_irqson(void) 201 { 202 } 203 204 static inline void rcu_irq_enter_irqson(void) 205 { 206 } 207 208 static inline void rcu_irq_exit(void) 209 { 210 } 211 212 static inline void exit_rcu(void) 213 { 214 } 215 216 #ifdef CONFIG_DEBUG_LOCK_ALLOC 217 extern int rcu_scheduler_active __read_mostly; 218 void rcu_scheduler_starting(void); 219 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 220 static inline void rcu_scheduler_starting(void) 221 { 222 } 223 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 224 225 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 226 227 static inline bool rcu_is_watching(void) 228 { 229 return __rcu_is_watching(); 230 } 231 232 #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 233 234 static inline bool rcu_is_watching(void) 235 { 236 return true; 237 } 238 239 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 240 241 static inline void rcu_all_qs(void) 242 { 243 barrier(); /* Avoid RCU read-side critical sections leaking across. */ 244 } 245 246 /* RCUtree hotplug events */ 247 #define rcutree_prepare_cpu NULL 248 #define rcutree_online_cpu NULL 249 #define rcutree_offline_cpu NULL 250 #define rcutree_dead_cpu NULL 251 #define rcutree_dying_cpu NULL 252 253 #endif /* __LINUX_RCUTINY_H */ 254