1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #ifndef __LINUX_TINY_H 26 #define __LINUX_TINY_H 27 28 #include <linux/cache.h> 29 30 struct rcu_dynticks; 31 static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) 32 { 33 return 0; 34 } 35 36 static inline unsigned long get_state_synchronize_rcu(void) 37 { 38 return 0; 39 } 40 41 static inline void cond_synchronize_rcu(unsigned long oldstate) 42 { 43 might_sleep(); 44 } 45 46 static inline unsigned long get_state_synchronize_sched(void) 47 { 48 return 0; 49 } 50 51 static inline void cond_synchronize_sched(unsigned long oldstate) 52 { 53 might_sleep(); 54 } 55 56 extern void rcu_barrier_bh(void); 57 extern void rcu_barrier_sched(void); 58 59 static inline void synchronize_rcu_expedited(void) 60 { 61 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 62 } 63 64 static inline void rcu_barrier(void) 65 { 66 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 67 } 68 69 static inline void synchronize_rcu_bh(void) 70 { 71 synchronize_sched(); 72 } 73 74 static inline void synchronize_rcu_bh_expedited(void) 75 { 76 synchronize_sched(); 77 } 78 79 static inline void synchronize_sched_expedited(void) 80 { 81 synchronize_sched(); 82 } 83 84 static inline void kfree_call_rcu(struct rcu_head *head, 85 rcu_callback_t func) 86 { 87 call_rcu(head, func); 88 } 89 90 static inline void rcu_note_context_switch(void) 91 { 92 rcu_sched_qs(); 93 } 94 95 /* 96 * Take advantage of the fact that there is only one CPU, which 97 * allows us to ignore virtualization-based context switches. 98 */ 99 static inline void rcu_virt_note_context_switch(int cpu) 100 { 101 } 102 103 /* 104 * Return the number of grace periods started. 105 */ 106 static inline unsigned long rcu_batches_started(void) 107 { 108 return 0; 109 } 110 111 /* 112 * Return the number of bottom-half grace periods started. 113 */ 114 static inline unsigned long rcu_batches_started_bh(void) 115 { 116 return 0; 117 } 118 119 /* 120 * Return the number of sched grace periods started. 121 */ 122 static inline unsigned long rcu_batches_started_sched(void) 123 { 124 return 0; 125 } 126 127 /* 128 * Return the number of grace periods completed. 129 */ 130 static inline unsigned long rcu_batches_completed(void) 131 { 132 return 0; 133 } 134 135 /* 136 * Return the number of bottom-half grace periods completed. 137 */ 138 static inline unsigned long rcu_batches_completed_bh(void) 139 { 140 return 0; 141 } 142 143 /* 144 * Return the number of sched grace periods completed. 145 */ 146 static inline unsigned long rcu_batches_completed_sched(void) 147 { 148 return 0; 149 } 150 151 /* 152 * Return the number of expedited grace periods completed. 153 */ 154 static inline unsigned long rcu_exp_batches_completed(void) 155 { 156 return 0; 157 } 158 159 /* 160 * Return the number of expedited sched grace periods completed. 161 */ 162 static inline unsigned long rcu_exp_batches_completed_sched(void) 163 { 164 return 0; 165 } 166 167 static inline void rcu_force_quiescent_state(void) 168 { 169 } 170 171 static inline void rcu_bh_force_quiescent_state(void) 172 { 173 } 174 175 static inline void rcu_sched_force_quiescent_state(void) 176 { 177 } 178 179 static inline void show_rcu_gp_kthreads(void) 180 { 181 } 182 183 static inline void rcu_cpu_stall_reset(void) 184 { 185 } 186 187 static inline void rcu_idle_enter(void) 188 { 189 } 190 191 static inline void rcu_idle_exit(void) 192 { 193 } 194 195 static inline void rcu_irq_enter(void) 196 { 197 } 198 199 static inline void rcu_irq_exit_irqson(void) 200 { 201 } 202 203 static inline void rcu_irq_enter_irqson(void) 204 { 205 } 206 207 static inline void rcu_irq_exit(void) 208 { 209 } 210 211 static inline void exit_rcu(void) 212 { 213 } 214 215 #ifdef CONFIG_DEBUG_LOCK_ALLOC 216 extern int rcu_scheduler_active __read_mostly; 217 void rcu_scheduler_starting(void); 218 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 219 static inline void rcu_scheduler_starting(void) 220 { 221 } 222 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 223 224 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 225 226 static inline bool rcu_is_watching(void) 227 { 228 return __rcu_is_watching(); 229 } 230 231 #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 232 233 static inline bool rcu_is_watching(void) 234 { 235 return true; 236 } 237 238 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 239 240 static inline void rcu_all_qs(void) 241 { 242 barrier(); /* Avoid RCU read-side critical sections leaking across. */ 243 } 244 245 /* RCUtree hotplug events */ 246 #define rcutree_prepare_cpu NULL 247 #define rcutree_online_cpu NULL 248 #define rcutree_offline_cpu NULL 249 #define rcutree_dead_cpu NULL 250 #define rcutree_dying_cpu NULL 251 252 #endif /* __LINUX_RCUTINY_H */ 253