Lines Matching +full:multi +full:- +full:node
2 * urcu-mb.c
33 #include "qemu/main-loop.h"
86 QLIST_FOREACH(index, ®istry, node) { in wait_for_readers()
87 qatomic_set(&index->waiting, true); in wait_for_readers()
90 /* Here, order the stores to index->waiting before the loads of in wait_for_readers()
91 * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(), in wait_for_readers()
92 * ensuring that the loads of index->ctr are sequentially consistent. in wait_for_readers()
96 * on architectures with 32-bit longs; see synchronize_rcu(). in wait_for_readers()
100 QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { in wait_for_readers()
101 if (!rcu_gp_ongoing(&index->ctr)) { in wait_for_readers()
102 QLIST_REMOVE(index, node); in wait_for_readers()
103 QLIST_INSERT_HEAD(&qsreaders, index, node); in wait_for_readers()
108 qatomic_set(&index->waiting, false); in wait_for_readers()
110 notifier_list_notify(&index->force_rcu, NULL); in wait_for_readers()
124 * thread must exit its RCU read-side critical section before in wait_for_readers()
131 * the node then will not be added back to ®istry by QLIST_SWAP in wait_for_readers()
132 * below. The invariant is that the node is part of one list when in wait_for_readers()
141 QLIST_SWAP(®istry, &qsreaders, node); in wait_for_readers()
148 /* Write RCU-protected pointers before reading p_rcu_reader->ctr. in synchronize_rcu()
151 * Also orders write to RCU-protected pointers before in synchronize_rcu()
159 /* For architectures with 32-bit longs, a two-subphases algorithm in synchronize_rcu()
162 * Switch parity: 0 -> 1, 1 -> 0. in synchronize_rcu()
179 /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
187 static void enqueue(struct rcu_head *node) in enqueue() argument
191 node->next = NULL; in enqueue()
194 * Make this node the tail of the list. The node will be in enqueue()
198 old_tail = qatomic_xchg(&tail, &node->next); in enqueue()
207 qatomic_store_release(old_tail, node); in enqueue()
212 struct rcu_head *node, *next; in try_dequeue() local
216 node = head; in try_dequeue()
219 * If the head node has NULL in its next pointer, the value is in try_dequeue()
222 next = qatomic_load_acquire(&node->next); in try_dequeue()
241 * dummy node, and the one being removed. So we do not need to update in try_dequeue()
246 /* If we dequeued the dummy node, add it back at the end and retry. */ in try_dequeue()
247 if (node == &dummy) { in try_dequeue()
248 enqueue(node); in try_dequeue()
252 return node; in try_dequeue()
257 struct rcu_head *node; in call_rcu_thread() local
288 node = try_dequeue(); in call_rcu_thread()
289 while (!node) { in call_rcu_thread()
292 node = try_dequeue(); in call_rcu_thread()
293 if (!node) { in call_rcu_thread()
295 node = try_dequeue(); in call_rcu_thread()
300 n--; in call_rcu_thread()
301 node->func(node); in call_rcu_thread()
308 void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) in call_rcu1() argument
310 node->func = func; in call_rcu1()
311 enqueue(node); in call_rcu1()
322 static void drain_rcu_callback(struct rcu_head *node) in drain_rcu_callback() argument
324 struct rcu_drain *event = (struct rcu_drain *)node; in drain_rcu_callback()
325 qemu_event_set(&event->drain_complete_event); in drain_rcu_callback()
375 assert(get_ptr_rcu_reader()->ctr == 0); in rcu_register_thread()
377 QLIST_INSERT_HEAD(®istry, get_ptr_rcu_reader(), node); in rcu_register_thread()
384 QLIST_REMOVE(get_ptr_rcu_reader(), node); in rcu_unregister_thread()
391 notifier_list_add(&get_ptr_rcu_reader()->force_rcu, n); in rcu_add_force_rcu_notifier()
430 atfork_depth--; in rcu_disable_atfork()