xref: /openbmc/qemu/util/rcu.c (revision 77a8b846)
17911747bSPaolo Bonzini /*
27911747bSPaolo Bonzini  * urcu-mb.c
37911747bSPaolo Bonzini  *
47911747bSPaolo Bonzini  * Userspace RCU library with explicit memory barriers
57911747bSPaolo Bonzini  *
67911747bSPaolo Bonzini  * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
77911747bSPaolo Bonzini  * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
87911747bSPaolo Bonzini  * Copyright 2015 Red Hat, Inc.
97911747bSPaolo Bonzini  *
107911747bSPaolo Bonzini  * Ported to QEMU by Paolo Bonzini  <pbonzini@redhat.com>
117911747bSPaolo Bonzini  *
127911747bSPaolo Bonzini  * This library is free software; you can redistribute it and/or
137911747bSPaolo Bonzini  * modify it under the terms of the GNU Lesser General Public
147911747bSPaolo Bonzini  * License as published by the Free Software Foundation; either
157911747bSPaolo Bonzini  * version 2.1 of the License, or (at your option) any later version.
167911747bSPaolo Bonzini  *
177911747bSPaolo Bonzini  * This library is distributed in the hope that it will be useful,
187911747bSPaolo Bonzini  * but WITHOUT ANY WARRANTY; without even the implied warranty of
197911747bSPaolo Bonzini  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
207911747bSPaolo Bonzini  * Lesser General Public License for more details.
217911747bSPaolo Bonzini  *
227911747bSPaolo Bonzini  * You should have received a copy of the GNU Lesser General Public
237911747bSPaolo Bonzini  * License along with this library; if not, write to the Free Software
247911747bSPaolo Bonzini  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
257911747bSPaolo Bonzini  *
267911747bSPaolo Bonzini  * IBM's contributions to this file may be relicensed under LGPLv2 or later.
277911747bSPaolo Bonzini  */
287911747bSPaolo Bonzini 
29aafd7584SPeter Maydell #include "qemu/osdep.h"
3026387f86SPaolo Bonzini #include "qemu-common.h"
317911747bSPaolo Bonzini #include "qemu/rcu.h"
327911747bSPaolo Bonzini #include "qemu/atomic.h"
3326387f86SPaolo Bonzini #include "qemu/thread.h"
34a4649824SPaolo Bonzini #include "qemu/main-loop.h"
355a22ab71SYang Zhong #if defined(CONFIG_MALLOC_TRIM)
365a22ab71SYang Zhong #include <malloc.h>
375a22ab71SYang Zhong #endif
387911747bSPaolo Bonzini 
397911747bSPaolo Bonzini /*
407911747bSPaolo Bonzini  * Global grace period counter.  Bit 0 is always one in rcu_gp_ctr.
417911747bSPaolo Bonzini  * Bits 1 and above are defined in synchronize_rcu.
427911747bSPaolo Bonzini  */
437911747bSPaolo Bonzini #define RCU_GP_LOCKED           (1UL << 0)
447911747bSPaolo Bonzini #define RCU_GP_CTR              (1UL << 1)
457911747bSPaolo Bonzini 
467911747bSPaolo Bonzini unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
477911747bSPaolo Bonzini 
487911747bSPaolo Bonzini QemuEvent rcu_gp_event;
49c097a60bSWen Congyang static QemuMutex rcu_registry_lock;
50c097a60bSWen Congyang static QemuMutex rcu_sync_lock;
517911747bSPaolo Bonzini 
527911747bSPaolo Bonzini /*
537911747bSPaolo Bonzini  * Check whether a quiescent state was crossed between the beginning of
547911747bSPaolo Bonzini  * update_counter_and_wait and now.
557911747bSPaolo Bonzini  */
567911747bSPaolo Bonzini static inline int rcu_gp_ongoing(unsigned long *ctr)
577911747bSPaolo Bonzini {
587911747bSPaolo Bonzini     unsigned long v;
597911747bSPaolo Bonzini 
607911747bSPaolo Bonzini     v = atomic_read(ctr);
617911747bSPaolo Bonzini     return v && (v != rcu_gp_ctr);
627911747bSPaolo Bonzini }
637911747bSPaolo Bonzini 
647911747bSPaolo Bonzini /* Written to only by each individual reader. Read by both the reader and the
657911747bSPaolo Bonzini  * writers.
667911747bSPaolo Bonzini  */
677911747bSPaolo Bonzini __thread struct rcu_reader_data rcu_reader;
687911747bSPaolo Bonzini 
69c097a60bSWen Congyang /* Protected by rcu_registry_lock.  */
707911747bSPaolo Bonzini typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
717911747bSPaolo Bonzini static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
727911747bSPaolo Bonzini 
737911747bSPaolo Bonzini /* Wait for previous parity/grace period to be empty of readers.  */
747911747bSPaolo Bonzini static void wait_for_readers(void)
757911747bSPaolo Bonzini {
767911747bSPaolo Bonzini     ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
777911747bSPaolo Bonzini     struct rcu_reader_data *index, *tmp;
787911747bSPaolo Bonzini 
797911747bSPaolo Bonzini     for (;;) {
807911747bSPaolo Bonzini         /* We want to be notified of changes made to rcu_gp_ongoing
817911747bSPaolo Bonzini          * while we walk the list.
827911747bSPaolo Bonzini          */
837911747bSPaolo Bonzini         qemu_event_reset(&rcu_gp_event);
847911747bSPaolo Bonzini 
857911747bSPaolo Bonzini         /* Instead of using atomic_mb_set for index->waiting, and
867911747bSPaolo Bonzini          * atomic_mb_read for index->ctr, memory barriers are placed
877911747bSPaolo Bonzini          * manually since writes to different threads are independent.
88e11131b0SPaolo Bonzini          * qemu_event_reset has acquire semantics, so no memory barrier
89e11131b0SPaolo Bonzini          * is needed here.
907911747bSPaolo Bonzini          */
917911747bSPaolo Bonzini         QLIST_FOREACH(index, &registry, node) {
927911747bSPaolo Bonzini             atomic_set(&index->waiting, true);
937911747bSPaolo Bonzini         }
947911747bSPaolo Bonzini 
95*77a8b846SPaolo Bonzini         /* Here, order the stores to index->waiting before the loads of
96*77a8b846SPaolo Bonzini          * index->ctr.  Pairs with smp_mb() in rcu_read_unlock(),
97*77a8b846SPaolo Bonzini          * ensuring that the loads of index->ctr are sequentially consistent.
98e11131b0SPaolo Bonzini          */
997911747bSPaolo Bonzini         smp_mb();
1007911747bSPaolo Bonzini 
1017911747bSPaolo Bonzini         QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
1027911747bSPaolo Bonzini             if (!rcu_gp_ongoing(&index->ctr)) {
1037911747bSPaolo Bonzini                 QLIST_REMOVE(index, node);
1047911747bSPaolo Bonzini                 QLIST_INSERT_HEAD(&qsreaders, index, node);
1057911747bSPaolo Bonzini 
1067911747bSPaolo Bonzini                 /* No need for mb_set here, worst of all we
1077911747bSPaolo Bonzini                  * get some extra futex wakeups.
1087911747bSPaolo Bonzini                  */
1097911747bSPaolo Bonzini                 atomic_set(&index->waiting, false);
1107911747bSPaolo Bonzini             }
1117911747bSPaolo Bonzini         }
1127911747bSPaolo Bonzini 
1137911747bSPaolo Bonzini         if (QLIST_EMPTY(&registry)) {
1147911747bSPaolo Bonzini             break;
1157911747bSPaolo Bonzini         }
1167911747bSPaolo Bonzini 
117c097a60bSWen Congyang         /* Wait for one thread to report a quiescent state and try again.
118c097a60bSWen Congyang          * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
119c097a60bSWen Congyang          * wait too much time.
120c097a60bSWen Congyang          *
121c097a60bSWen Congyang          * rcu_register_thread() may add nodes to &registry; it will not
122c097a60bSWen Congyang          * wake up synchronize_rcu, but that is okay because at least another
123c097a60bSWen Congyang          * thread must exit its RCU read-side critical section before
124c097a60bSWen Congyang          * synchronize_rcu is done.  The next iteration of the loop will
125c097a60bSWen Congyang          * move the new thread's rcu_reader from &registry to &qsreaders,
126c097a60bSWen Congyang          * because rcu_gp_ongoing() will return false.
127c097a60bSWen Congyang          *
128c097a60bSWen Congyang          * rcu_unregister_thread() may remove nodes from &qsreaders instead
129c097a60bSWen Congyang          * of &registry if it runs during qemu_event_wait.  That's okay;
130c097a60bSWen Congyang          * the node then will not be added back to &registry by QLIST_SWAP
131c097a60bSWen Congyang          * below.  The invariant is that the node is part of one list when
132c097a60bSWen Congyang          * rcu_registry_lock is released.
1337911747bSPaolo Bonzini          */
134c097a60bSWen Congyang         qemu_mutex_unlock(&rcu_registry_lock);
1357911747bSPaolo Bonzini         qemu_event_wait(&rcu_gp_event);
136c097a60bSWen Congyang         qemu_mutex_lock(&rcu_registry_lock);
1377911747bSPaolo Bonzini     }
1387911747bSPaolo Bonzini 
1397911747bSPaolo Bonzini     /* put back the reader list in the registry */
1407911747bSPaolo Bonzini     QLIST_SWAP(&registry, &qsreaders, node);
1417911747bSPaolo Bonzini }
1427911747bSPaolo Bonzini 
1437911747bSPaolo Bonzini void synchronize_rcu(void)
1447911747bSPaolo Bonzini {
145c097a60bSWen Congyang     qemu_mutex_lock(&rcu_sync_lock);
1467911747bSPaolo Bonzini 
147*77a8b846SPaolo Bonzini     /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
148*77a8b846SPaolo Bonzini      * Pairs with smp_mb() in rcu_read_lock().
149*77a8b846SPaolo Bonzini      */
150*77a8b846SPaolo Bonzini     smp_mb();
151*77a8b846SPaolo Bonzini 
152*77a8b846SPaolo Bonzini     qemu_mutex_lock(&rcu_registry_lock);
1537911747bSPaolo Bonzini     if (!QLIST_EMPTY(&registry)) {
1547911747bSPaolo Bonzini         /* In either case, the atomic_mb_set below blocks stores that free
1557911747bSPaolo Bonzini          * old RCU-protected pointers.
1567911747bSPaolo Bonzini          */
1577911747bSPaolo Bonzini         if (sizeof(rcu_gp_ctr) < 8) {
1587911747bSPaolo Bonzini             /* For architectures with 32-bit longs, a two-subphases algorithm
1597911747bSPaolo Bonzini              * ensures we do not encounter overflow bugs.
1607911747bSPaolo Bonzini              *
1617911747bSPaolo Bonzini              * Switch parity: 0 -> 1, 1 -> 0.
1627911747bSPaolo Bonzini              */
1637911747bSPaolo Bonzini             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
1647911747bSPaolo Bonzini             wait_for_readers();
1657911747bSPaolo Bonzini             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
1667911747bSPaolo Bonzini         } else {
1677911747bSPaolo Bonzini             /* Increment current grace period.  */
1687911747bSPaolo Bonzini             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
1697911747bSPaolo Bonzini         }
1707911747bSPaolo Bonzini 
1717911747bSPaolo Bonzini         wait_for_readers();
1727911747bSPaolo Bonzini     }
1737911747bSPaolo Bonzini 
174c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_registry_lock);
175c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_sync_lock);
1767911747bSPaolo Bonzini }
1777911747bSPaolo Bonzini 
17826387f86SPaolo Bonzini 
17926387f86SPaolo Bonzini #define RCU_CALL_MIN_SIZE        30
18026387f86SPaolo Bonzini 
18126387f86SPaolo Bonzini /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
18226387f86SPaolo Bonzini  * from liburcu.  Note that head is only used by the consumer.
18326387f86SPaolo Bonzini  */
18426387f86SPaolo Bonzini static struct rcu_head dummy;
18526387f86SPaolo Bonzini static struct rcu_head *head = &dummy, **tail = &dummy.next;
18626387f86SPaolo Bonzini static int rcu_call_count;
18726387f86SPaolo Bonzini static QemuEvent rcu_call_ready_event;
18826387f86SPaolo Bonzini 
18926387f86SPaolo Bonzini static void enqueue(struct rcu_head *node)
19026387f86SPaolo Bonzini {
19126387f86SPaolo Bonzini     struct rcu_head **old_tail;
19226387f86SPaolo Bonzini 
19326387f86SPaolo Bonzini     node->next = NULL;
19426387f86SPaolo Bonzini     old_tail = atomic_xchg(&tail, &node->next);
19526387f86SPaolo Bonzini     atomic_mb_set(old_tail, node);
19626387f86SPaolo Bonzini }
19726387f86SPaolo Bonzini 
19826387f86SPaolo Bonzini static struct rcu_head *try_dequeue(void)
19926387f86SPaolo Bonzini {
20026387f86SPaolo Bonzini     struct rcu_head *node, *next;
20126387f86SPaolo Bonzini 
20226387f86SPaolo Bonzini retry:
20326387f86SPaolo Bonzini     /* Test for an empty list, which we do not expect.  Note that for
20426387f86SPaolo Bonzini      * the consumer head and tail are always consistent.  The head
20526387f86SPaolo Bonzini      * is consistent because only the consumer reads/writes it.
20626387f86SPaolo Bonzini      * The tail, because it is the first step in the enqueuing.
20726387f86SPaolo Bonzini      * It is only the next pointers that might be inconsistent.
20826387f86SPaolo Bonzini      */
20926387f86SPaolo Bonzini     if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
21026387f86SPaolo Bonzini         abort();
21126387f86SPaolo Bonzini     }
21226387f86SPaolo Bonzini 
21326387f86SPaolo Bonzini     /* If the head node has NULL in its next pointer, the value is
21426387f86SPaolo Bonzini      * wrong and we need to wait until its enqueuer finishes the update.
21526387f86SPaolo Bonzini      */
21626387f86SPaolo Bonzini     node = head;
21726387f86SPaolo Bonzini     next = atomic_mb_read(&head->next);
21826387f86SPaolo Bonzini     if (!next) {
21926387f86SPaolo Bonzini         return NULL;
22026387f86SPaolo Bonzini     }
22126387f86SPaolo Bonzini 
22226387f86SPaolo Bonzini     /* Since we are the sole consumer, and we excluded the empty case
22326387f86SPaolo Bonzini      * above, the queue will always have at least two nodes: the
22426387f86SPaolo Bonzini      * dummy node, and the one being removed.  So we do not need to update
22526387f86SPaolo Bonzini      * the tail pointer.
22626387f86SPaolo Bonzini      */
22726387f86SPaolo Bonzini     head = next;
22826387f86SPaolo Bonzini 
22926387f86SPaolo Bonzini     /* If we dequeued the dummy node, add it back at the end and retry.  */
23026387f86SPaolo Bonzini     if (node == &dummy) {
23126387f86SPaolo Bonzini         enqueue(node);
23226387f86SPaolo Bonzini         goto retry;
23326387f86SPaolo Bonzini     }
23426387f86SPaolo Bonzini 
23526387f86SPaolo Bonzini     return node;
23626387f86SPaolo Bonzini }
23726387f86SPaolo Bonzini 
23826387f86SPaolo Bonzini static void *call_rcu_thread(void *opaque)
23926387f86SPaolo Bonzini {
24026387f86SPaolo Bonzini     struct rcu_head *node;
24126387f86SPaolo Bonzini 
242ab28bd23SPaolo Bonzini     rcu_register_thread();
243ab28bd23SPaolo Bonzini 
24426387f86SPaolo Bonzini     for (;;) {
24526387f86SPaolo Bonzini         int tries = 0;
24626387f86SPaolo Bonzini         int n = atomic_read(&rcu_call_count);
24726387f86SPaolo Bonzini 
24826387f86SPaolo Bonzini         /* Heuristically wait for a decent number of callbacks to pile up.
24926387f86SPaolo Bonzini          * Fetch rcu_call_count now, we only must process elements that were
25026387f86SPaolo Bonzini          * added before synchronize_rcu() starts.
25126387f86SPaolo Bonzini          */
252a7d1d636SPaolo Bonzini         while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
253a7d1d636SPaolo Bonzini             g_usleep(10000);
254a7d1d636SPaolo Bonzini             if (n == 0) {
25526387f86SPaolo Bonzini                 qemu_event_reset(&rcu_call_ready_event);
25626387f86SPaolo Bonzini                 n = atomic_read(&rcu_call_count);
257a7d1d636SPaolo Bonzini                 if (n == 0) {
2585a22ab71SYang Zhong #if defined(CONFIG_MALLOC_TRIM)
2595a22ab71SYang Zhong                     malloc_trim(4 * 1024 * 1024);
2605a22ab71SYang Zhong #endif
26126387f86SPaolo Bonzini                     qemu_event_wait(&rcu_call_ready_event);
26226387f86SPaolo Bonzini                 }
26326387f86SPaolo Bonzini             }
264a7d1d636SPaolo Bonzini             n = atomic_read(&rcu_call_count);
265a7d1d636SPaolo Bonzini         }
26626387f86SPaolo Bonzini 
26726387f86SPaolo Bonzini         atomic_sub(&rcu_call_count, n);
26826387f86SPaolo Bonzini         synchronize_rcu();
269a4649824SPaolo Bonzini         qemu_mutex_lock_iothread();
27026387f86SPaolo Bonzini         while (n > 0) {
27126387f86SPaolo Bonzini             node = try_dequeue();
27226387f86SPaolo Bonzini             while (!node) {
273a4649824SPaolo Bonzini                 qemu_mutex_unlock_iothread();
27426387f86SPaolo Bonzini                 qemu_event_reset(&rcu_call_ready_event);
27526387f86SPaolo Bonzini                 node = try_dequeue();
27626387f86SPaolo Bonzini                 if (!node) {
27726387f86SPaolo Bonzini                     qemu_event_wait(&rcu_call_ready_event);
27826387f86SPaolo Bonzini                     node = try_dequeue();
27926387f86SPaolo Bonzini                 }
280a4649824SPaolo Bonzini                 qemu_mutex_lock_iothread();
28126387f86SPaolo Bonzini             }
28226387f86SPaolo Bonzini 
28326387f86SPaolo Bonzini             n--;
28426387f86SPaolo Bonzini             node->func(node);
28526387f86SPaolo Bonzini         }
286a4649824SPaolo Bonzini         qemu_mutex_unlock_iothread();
28726387f86SPaolo Bonzini     }
28826387f86SPaolo Bonzini     abort();
28926387f86SPaolo Bonzini }
29026387f86SPaolo Bonzini 
29126387f86SPaolo Bonzini void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
29226387f86SPaolo Bonzini {
29326387f86SPaolo Bonzini     node->func = func;
29426387f86SPaolo Bonzini     enqueue(node);
29526387f86SPaolo Bonzini     atomic_inc(&rcu_call_count);
29626387f86SPaolo Bonzini     qemu_event_set(&rcu_call_ready_event);
29726387f86SPaolo Bonzini }
29826387f86SPaolo Bonzini 
2997911747bSPaolo Bonzini void rcu_register_thread(void)
3007911747bSPaolo Bonzini {
3017911747bSPaolo Bonzini     assert(rcu_reader.ctr == 0);
302c097a60bSWen Congyang     qemu_mutex_lock(&rcu_registry_lock);
3037911747bSPaolo Bonzini     QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
304c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_registry_lock);
3057911747bSPaolo Bonzini }
3067911747bSPaolo Bonzini 
3077911747bSPaolo Bonzini void rcu_unregister_thread(void)
3087911747bSPaolo Bonzini {
309c097a60bSWen Congyang     qemu_mutex_lock(&rcu_registry_lock);
3107911747bSPaolo Bonzini     QLIST_REMOVE(&rcu_reader, node);
311c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_registry_lock);
3127911747bSPaolo Bonzini }
3137911747bSPaolo Bonzini 
31421b7cf9eSPaolo Bonzini static void rcu_init_complete(void)
3157911747bSPaolo Bonzini {
31626387f86SPaolo Bonzini     QemuThread thread;
31726387f86SPaolo Bonzini 
318c097a60bSWen Congyang     qemu_mutex_init(&rcu_registry_lock);
319c097a60bSWen Congyang     qemu_mutex_init(&rcu_sync_lock);
3207911747bSPaolo Bonzini     qemu_event_init(&rcu_gp_event, true);
32126387f86SPaolo Bonzini 
32226387f86SPaolo Bonzini     qemu_event_init(&rcu_call_ready_event, false);
32321b7cf9eSPaolo Bonzini 
32421b7cf9eSPaolo Bonzini     /* The caller is assumed to have iothread lock, so the call_rcu thread
32521b7cf9eSPaolo Bonzini      * must have been quiescent even after forking, just recreate it.
32621b7cf9eSPaolo Bonzini      */
32726387f86SPaolo Bonzini     qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
32826387f86SPaolo Bonzini                        NULL, QEMU_THREAD_DETACHED);
32926387f86SPaolo Bonzini 
3307911747bSPaolo Bonzini     rcu_register_thread();
3317911747bSPaolo Bonzini }
33221b7cf9eSPaolo Bonzini 
33373c6e401SPaolo Bonzini static int atfork_depth = 1;
33473c6e401SPaolo Bonzini 
33573c6e401SPaolo Bonzini void rcu_enable_atfork(void)
33673c6e401SPaolo Bonzini {
33773c6e401SPaolo Bonzini     atfork_depth++;
33873c6e401SPaolo Bonzini }
33973c6e401SPaolo Bonzini 
34073c6e401SPaolo Bonzini void rcu_disable_atfork(void)
34173c6e401SPaolo Bonzini {
34273c6e401SPaolo Bonzini     atfork_depth--;
34373c6e401SPaolo Bonzini }
34473c6e401SPaolo Bonzini 
34521b7cf9eSPaolo Bonzini #ifdef CONFIG_POSIX
34621b7cf9eSPaolo Bonzini static void rcu_init_lock(void)
34721b7cf9eSPaolo Bonzini {
34873c6e401SPaolo Bonzini     if (atfork_depth < 1) {
34973c6e401SPaolo Bonzini         return;
35073c6e401SPaolo Bonzini     }
35173c6e401SPaolo Bonzini 
352c097a60bSWen Congyang     qemu_mutex_lock(&rcu_sync_lock);
353c097a60bSWen Congyang     qemu_mutex_lock(&rcu_registry_lock);
35421b7cf9eSPaolo Bonzini }
35521b7cf9eSPaolo Bonzini 
35621b7cf9eSPaolo Bonzini static void rcu_init_unlock(void)
35721b7cf9eSPaolo Bonzini {
35873c6e401SPaolo Bonzini     if (atfork_depth < 1) {
35973c6e401SPaolo Bonzini         return;
36073c6e401SPaolo Bonzini     }
36173c6e401SPaolo Bonzini 
362c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_registry_lock);
363c097a60bSWen Congyang     qemu_mutex_unlock(&rcu_sync_lock);
36421b7cf9eSPaolo Bonzini }
36521b7cf9eSPaolo Bonzini 
3662a96a552SPaolo Bonzini static void rcu_init_child(void)
36721b7cf9eSPaolo Bonzini {
3682a96a552SPaolo Bonzini     if (atfork_depth < 1) {
3692a96a552SPaolo Bonzini         return;
3702a96a552SPaolo Bonzini     }
3712a96a552SPaolo Bonzini 
37221b7cf9eSPaolo Bonzini     memset(&registry, 0, sizeof(registry));
37321b7cf9eSPaolo Bonzini     rcu_init_complete();
37421b7cf9eSPaolo Bonzini }
3752a96a552SPaolo Bonzini #endif
37621b7cf9eSPaolo Bonzini 
37721b7cf9eSPaolo Bonzini static void __attribute__((__constructor__)) rcu_init(void)
37821b7cf9eSPaolo Bonzini {
37921b7cf9eSPaolo Bonzini #ifdef CONFIG_POSIX
3802a96a552SPaolo Bonzini     pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
38121b7cf9eSPaolo Bonzini #endif
38221b7cf9eSPaolo Bonzini     rcu_init_complete();
38321b7cf9eSPaolo Bonzini }
384