1 /* 2 * urcu-mb.c 3 * 4 * Userspace RCU library with explicit memory barriers 5 * 6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> 7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. 8 * Copyright 2015 Red Hat, Inc. 9 * 10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com> 11 * 12 * This library is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU Lesser General Public 14 * License as published by the Free Software Foundation; either 15 * version 2.1 of the License, or (at your option) any later version. 16 * 17 * This library is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * Lesser General Public License for more details. 21 * 22 * You should have received a copy of the GNU Lesser General Public 23 * License along with this library; if not, write to the Free Software 24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 25 * 26 * IBM's contributions to this file may be relicensed under LGPLv2 or later. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu-common.h" 31 #include "qemu/rcu.h" 32 #include "qemu/atomic.h" 33 #include "qemu/thread.h" 34 #include "qemu/main-loop.h" 35 #if defined(CONFIG_MALLOC_TRIM) 36 #include <malloc.h> 37 #endif 38 39 /* 40 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr. 41 * Bits 1 and above are defined in synchronize_rcu. 42 */ 43 #define RCU_GP_LOCKED (1UL << 0) 44 #define RCU_GP_CTR (1UL << 1) 45 46 unsigned long rcu_gp_ctr = RCU_GP_LOCKED; 47 48 QemuEvent rcu_gp_event; 49 static QemuMutex rcu_registry_lock; 50 static QemuMutex rcu_sync_lock; 51 52 /* 53 * Check whether a quiescent state was crossed between the beginning of 54 * update_counter_and_wait and now. 55 */ 56 static inline int rcu_gp_ongoing(unsigned long *ctr) 57 { 58 unsigned long v; 59 60 v = atomic_read(ctr); 61 return v && (v != rcu_gp_ctr); 62 } 63 64 /* Written to only by each individual reader. Read by both the reader and the 65 * writers. 66 */ 67 __thread struct rcu_reader_data rcu_reader; 68 69 /* Protected by rcu_registry_lock. */ 70 typedef QLIST_HEAD(, rcu_reader_data) ThreadList; 71 static ThreadList registry = QLIST_HEAD_INITIALIZER(registry); 72 73 /* Wait for previous parity/grace period to be empty of readers. */ 74 static void wait_for_readers(void) 75 { 76 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders); 77 struct rcu_reader_data *index, *tmp; 78 79 for (;;) { 80 /* We want to be notified of changes made to rcu_gp_ongoing 81 * while we walk the list. 82 */ 83 qemu_event_reset(&rcu_gp_event); 84 85 /* Instead of using atomic_mb_set for index->waiting, and 86 * atomic_mb_read for index->ctr, memory barriers are placed 87 * manually since writes to different threads are independent. 88 * qemu_event_reset has acquire semantics, so no memory barrier 89 * is needed here. 90 */ 91 QLIST_FOREACH(index, ®istry, node) { 92 atomic_set(&index->waiting, true); 93 } 94 95 /* Here, order the stores to index->waiting before the 96 * loads of index->ctr. 97 */ 98 smp_mb(); 99 100 QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { 101 if (!rcu_gp_ongoing(&index->ctr)) { 102 QLIST_REMOVE(index, node); 103 QLIST_INSERT_HEAD(&qsreaders, index, node); 104 105 /* No need for mb_set here, worst of all we 106 * get some extra futex wakeups. 107 */ 108 atomic_set(&index->waiting, false); 109 } 110 } 111 112 if (QLIST_EMPTY(®istry)) { 113 break; 114 } 115 116 /* Wait for one thread to report a quiescent state and try again. 117 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't 118 * wait too much time. 119 * 120 * rcu_register_thread() may add nodes to ®istry; it will not 121 * wake up synchronize_rcu, but that is okay because at least another 122 * thread must exit its RCU read-side critical section before 123 * synchronize_rcu is done. The next iteration of the loop will 124 * move the new thread's rcu_reader from ®istry to &qsreaders, 125 * because rcu_gp_ongoing() will return false. 126 * 127 * rcu_unregister_thread() may remove nodes from &qsreaders instead 128 * of ®istry if it runs during qemu_event_wait. That's okay; 129 * the node then will not be added back to ®istry by QLIST_SWAP 130 * below. The invariant is that the node is part of one list when 131 * rcu_registry_lock is released. 132 */ 133 qemu_mutex_unlock(&rcu_registry_lock); 134 qemu_event_wait(&rcu_gp_event); 135 qemu_mutex_lock(&rcu_registry_lock); 136 } 137 138 /* put back the reader list in the registry */ 139 QLIST_SWAP(®istry, &qsreaders, node); 140 } 141 142 void synchronize_rcu(void) 143 { 144 qemu_mutex_lock(&rcu_sync_lock); 145 qemu_mutex_lock(&rcu_registry_lock); 146 147 if (!QLIST_EMPTY(®istry)) { 148 /* In either case, the atomic_mb_set below blocks stores that free 149 * old RCU-protected pointers. 150 */ 151 if (sizeof(rcu_gp_ctr) < 8) { 152 /* For architectures with 32-bit longs, a two-subphases algorithm 153 * ensures we do not encounter overflow bugs. 154 * 155 * Switch parity: 0 -> 1, 1 -> 0. 156 */ 157 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); 158 wait_for_readers(); 159 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); 160 } else { 161 /* Increment current grace period. */ 162 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); 163 } 164 165 wait_for_readers(); 166 } 167 168 qemu_mutex_unlock(&rcu_registry_lock); 169 qemu_mutex_unlock(&rcu_sync_lock); 170 } 171 172 173 #define RCU_CALL_MIN_SIZE 30 174 175 /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h 176 * from liburcu. Note that head is only used by the consumer. 177 */ 178 static struct rcu_head dummy; 179 static struct rcu_head *head = &dummy, **tail = &dummy.next; 180 static int rcu_call_count; 181 static QemuEvent rcu_call_ready_event; 182 183 static void enqueue(struct rcu_head *node) 184 { 185 struct rcu_head **old_tail; 186 187 node->next = NULL; 188 old_tail = atomic_xchg(&tail, &node->next); 189 atomic_mb_set(old_tail, node); 190 } 191 192 static struct rcu_head *try_dequeue(void) 193 { 194 struct rcu_head *node, *next; 195 196 retry: 197 /* Test for an empty list, which we do not expect. Note that for 198 * the consumer head and tail are always consistent. The head 199 * is consistent because only the consumer reads/writes it. 200 * The tail, because it is the first step in the enqueuing. 201 * It is only the next pointers that might be inconsistent. 202 */ 203 if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { 204 abort(); 205 } 206 207 /* If the head node has NULL in its next pointer, the value is 208 * wrong and we need to wait until its enqueuer finishes the update. 209 */ 210 node = head; 211 next = atomic_mb_read(&head->next); 212 if (!next) { 213 return NULL; 214 } 215 216 /* Since we are the sole consumer, and we excluded the empty case 217 * above, the queue will always have at least two nodes: the 218 * dummy node, and the one being removed. So we do not need to update 219 * the tail pointer. 220 */ 221 head = next; 222 223 /* If we dequeued the dummy node, add it back at the end and retry. */ 224 if (node == &dummy) { 225 enqueue(node); 226 goto retry; 227 } 228 229 return node; 230 } 231 232 static void *call_rcu_thread(void *opaque) 233 { 234 struct rcu_head *node; 235 236 rcu_register_thread(); 237 238 for (;;) { 239 int tries = 0; 240 int n = atomic_read(&rcu_call_count); 241 242 /* Heuristically wait for a decent number of callbacks to pile up. 243 * Fetch rcu_call_count now, we only must process elements that were 244 * added before synchronize_rcu() starts. 245 */ 246 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) { 247 g_usleep(10000); 248 if (n == 0) { 249 qemu_event_reset(&rcu_call_ready_event); 250 n = atomic_read(&rcu_call_count); 251 if (n == 0) { 252 #if defined(CONFIG_MALLOC_TRIM) 253 malloc_trim(4 * 1024 * 1024); 254 #endif 255 qemu_event_wait(&rcu_call_ready_event); 256 } 257 } 258 n = atomic_read(&rcu_call_count); 259 } 260 261 atomic_sub(&rcu_call_count, n); 262 synchronize_rcu(); 263 qemu_mutex_lock_iothread(); 264 while (n > 0) { 265 node = try_dequeue(); 266 while (!node) { 267 qemu_mutex_unlock_iothread(); 268 qemu_event_reset(&rcu_call_ready_event); 269 node = try_dequeue(); 270 if (!node) { 271 qemu_event_wait(&rcu_call_ready_event); 272 node = try_dequeue(); 273 } 274 qemu_mutex_lock_iothread(); 275 } 276 277 n--; 278 node->func(node); 279 } 280 qemu_mutex_unlock_iothread(); 281 } 282 abort(); 283 } 284 285 void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) 286 { 287 node->func = func; 288 enqueue(node); 289 atomic_inc(&rcu_call_count); 290 qemu_event_set(&rcu_call_ready_event); 291 } 292 293 void rcu_register_thread(void) 294 { 295 assert(rcu_reader.ctr == 0); 296 qemu_mutex_lock(&rcu_registry_lock); 297 QLIST_INSERT_HEAD(®istry, &rcu_reader, node); 298 qemu_mutex_unlock(&rcu_registry_lock); 299 } 300 301 void rcu_unregister_thread(void) 302 { 303 qemu_mutex_lock(&rcu_registry_lock); 304 QLIST_REMOVE(&rcu_reader, node); 305 qemu_mutex_unlock(&rcu_registry_lock); 306 } 307 308 static void rcu_init_complete(void) 309 { 310 QemuThread thread; 311 312 qemu_mutex_init(&rcu_registry_lock); 313 qemu_mutex_init(&rcu_sync_lock); 314 qemu_event_init(&rcu_gp_event, true); 315 316 qemu_event_init(&rcu_call_ready_event, false); 317 318 /* The caller is assumed to have iothread lock, so the call_rcu thread 319 * must have been quiescent even after forking, just recreate it. 320 */ 321 qemu_thread_create(&thread, "call_rcu", call_rcu_thread, 322 NULL, QEMU_THREAD_DETACHED); 323 324 rcu_register_thread(); 325 } 326 327 static int atfork_depth = 1; 328 329 void rcu_enable_atfork(void) 330 { 331 atfork_depth++; 332 } 333 334 void rcu_disable_atfork(void) 335 { 336 atfork_depth--; 337 } 338 339 #ifdef CONFIG_POSIX 340 static void rcu_init_lock(void) 341 { 342 if (atfork_depth < 1) { 343 return; 344 } 345 346 qemu_mutex_lock(&rcu_sync_lock); 347 qemu_mutex_lock(&rcu_registry_lock); 348 } 349 350 static void rcu_init_unlock(void) 351 { 352 if (atfork_depth < 1) { 353 return; 354 } 355 356 qemu_mutex_unlock(&rcu_registry_lock); 357 qemu_mutex_unlock(&rcu_sync_lock); 358 } 359 360 static void rcu_init_child(void) 361 { 362 if (atfork_depth < 1) { 363 return; 364 } 365 366 memset(®istry, 0, sizeof(registry)); 367 rcu_init_complete(); 368 } 369 #endif 370 371 static void __attribute__((__constructor__)) rcu_init(void) 372 { 373 #ifdef CONFIG_POSIX 374 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child); 375 #endif 376 rcu_init_complete(); 377 } 378