xref: /openbmc/qemu/util/lockcnt.c (revision 92d09502)
151dee5e4SPaolo Bonzini /*
251dee5e4SPaolo Bonzini  * QemuLockCnt implementation
351dee5e4SPaolo Bonzini  *
451dee5e4SPaolo Bonzini  * Copyright Red Hat, Inc. 2017
551dee5e4SPaolo Bonzini  *
651dee5e4SPaolo Bonzini  * Author:
751dee5e4SPaolo Bonzini  *   Paolo Bonzini <pbonzini@redhat.com>
851dee5e4SPaolo Bonzini  */
951dee5e4SPaolo Bonzini #include "qemu/osdep.h"
1051dee5e4SPaolo Bonzini #include "qemu/thread.h"
1151dee5e4SPaolo Bonzini #include "qemu/atomic.h"
12fbcc3e50SPaolo Bonzini #include "trace.h"
1351dee5e4SPaolo Bonzini 
14fbcc3e50SPaolo Bonzini #ifdef CONFIG_LINUX
15fbcc3e50SPaolo Bonzini #include "qemu/futex.h"
16fbcc3e50SPaolo Bonzini 
17fbcc3e50SPaolo Bonzini /* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
18fbcc3e50SPaolo Bonzini  * For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
19fbcc3e50SPaolo Bonzini  * this is not the most relaxing citation I could make...).  It is similar
20fbcc3e50SPaolo Bonzini  * to mutex2 in the paper.
21fbcc3e50SPaolo Bonzini  */
22fbcc3e50SPaolo Bonzini 
23fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_MASK    3
24fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_FREE    0   /* free, uncontended */
25fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_LOCKED  1   /* locked, uncontended */
26fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_WAITING 2   /* locked, contended */
27fbcc3e50SPaolo Bonzini 
28fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_STEP    4
29fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_SHIFT   2
30fbcc3e50SPaolo Bonzini 
qemu_lockcnt_init(QemuLockCnt * lockcnt)31fbcc3e50SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
32fbcc3e50SPaolo Bonzini {
33fbcc3e50SPaolo Bonzini     lockcnt->count = 0;
34fbcc3e50SPaolo Bonzini }
35fbcc3e50SPaolo Bonzini 
qemu_lockcnt_destroy(QemuLockCnt * lockcnt)36fbcc3e50SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
37fbcc3e50SPaolo Bonzini {
38fbcc3e50SPaolo Bonzini }
39fbcc3e50SPaolo Bonzini 
40fbcc3e50SPaolo Bonzini /* *val is the current value of lockcnt->count.
41fbcc3e50SPaolo Bonzini  *
42fbcc3e50SPaolo Bonzini  * If the lock is free, try a cmpxchg from *val to new_if_free; return
43fbcc3e50SPaolo Bonzini  * true and set *val to the old value found by the cmpxchg in
44fbcc3e50SPaolo Bonzini  * lockcnt->count.
45fbcc3e50SPaolo Bonzini  *
46fbcc3e50SPaolo Bonzini  * If the lock is taken, wait for it to be released and return false
47fbcc3e50SPaolo Bonzini  * *without trying again to take the lock*.  Again, set *val to the
48fbcc3e50SPaolo Bonzini  * new value of lockcnt->count.
49fbcc3e50SPaolo Bonzini  *
50fbcc3e50SPaolo Bonzini  * If *waited is true on return, new_if_free's bottom two bits must not
51fbcc3e50SPaolo Bonzini  * be QEMU_LOCKCNT_STATE_LOCKED on subsequent calls, because the caller
52fbcc3e50SPaolo Bonzini  * does not know if there are other waiters.  Furthermore, after *waited
53fbcc3e50SPaolo Bonzini  * is set the caller has effectively acquired the lock.  If it returns
54fbcc3e50SPaolo Bonzini  * with the lock not taken, it must wake another futex waiter.
55fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt * lockcnt,int * val,int new_if_free,bool * waited)56fbcc3e50SPaolo Bonzini static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
57fbcc3e50SPaolo Bonzini                                          int new_if_free, bool *waited)
58fbcc3e50SPaolo Bonzini {
59fbcc3e50SPaolo Bonzini     /* Fast path for when the lock is free.  */
60fbcc3e50SPaolo Bonzini     if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_FREE) {
61fbcc3e50SPaolo Bonzini         int expected = *val;
62fbcc3e50SPaolo Bonzini 
63fbcc3e50SPaolo Bonzini         trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free);
64*d73415a3SStefan Hajnoczi         *val = qatomic_cmpxchg(&lockcnt->count, expected, new_if_free);
65fbcc3e50SPaolo Bonzini         if (*val == expected) {
66fbcc3e50SPaolo Bonzini             trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free);
67fbcc3e50SPaolo Bonzini             *val = new_if_free;
68fbcc3e50SPaolo Bonzini             return true;
69fbcc3e50SPaolo Bonzini         }
70fbcc3e50SPaolo Bonzini     }
71fbcc3e50SPaolo Bonzini 
72fbcc3e50SPaolo Bonzini     /* The slow path moves from locked to waiting if necessary, then
73fbcc3e50SPaolo Bonzini      * does a futex wait.  Both steps can be repeated ad nauseam,
74fbcc3e50SPaolo Bonzini      * only getting out of the loop if we can have another shot at the
75fbcc3e50SPaolo Bonzini      * fast path.  Once we can, get out to compute the new destination
76fbcc3e50SPaolo Bonzini      * value for the fast path.
77fbcc3e50SPaolo Bonzini      */
78fbcc3e50SPaolo Bonzini     while ((*val & QEMU_LOCKCNT_STATE_MASK) != QEMU_LOCKCNT_STATE_FREE) {
79fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_LOCKED) {
80fbcc3e50SPaolo Bonzini             int expected = *val;
81fbcc3e50SPaolo Bonzini             int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING;
82fbcc3e50SPaolo Bonzini 
83fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_prepare(lockcnt, expected, new);
84*d73415a3SStefan Hajnoczi             *val = qatomic_cmpxchg(&lockcnt->count, expected, new);
85fbcc3e50SPaolo Bonzini             if (*val == expected) {
86fbcc3e50SPaolo Bonzini                 *val = new;
87fbcc3e50SPaolo Bonzini             }
88fbcc3e50SPaolo Bonzini             continue;
89fbcc3e50SPaolo Bonzini         }
90fbcc3e50SPaolo Bonzini 
91fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_WAITING) {
92fbcc3e50SPaolo Bonzini             *waited = true;
93fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait(lockcnt, *val);
94fbcc3e50SPaolo Bonzini             qemu_futex_wait(&lockcnt->count, *val);
95*d73415a3SStefan Hajnoczi             *val = qatomic_read(&lockcnt->count);
96fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_resume(lockcnt, *val);
97fbcc3e50SPaolo Bonzini             continue;
98fbcc3e50SPaolo Bonzini         }
99fbcc3e50SPaolo Bonzini 
100fbcc3e50SPaolo Bonzini         abort();
101fbcc3e50SPaolo Bonzini     }
102fbcc3e50SPaolo Bonzini     return false;
103fbcc3e50SPaolo Bonzini }
104fbcc3e50SPaolo Bonzini 
lockcnt_wake(QemuLockCnt * lockcnt)105fbcc3e50SPaolo Bonzini static void lockcnt_wake(QemuLockCnt *lockcnt)
106fbcc3e50SPaolo Bonzini {
107fbcc3e50SPaolo Bonzini     trace_lockcnt_futex_wake(lockcnt);
108fbcc3e50SPaolo Bonzini     qemu_futex_wake(&lockcnt->count, 1);
109fbcc3e50SPaolo Bonzini }
110fbcc3e50SPaolo Bonzini 
qemu_lockcnt_inc(QemuLockCnt * lockcnt)111fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
112fbcc3e50SPaolo Bonzini {
113*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
114fbcc3e50SPaolo Bonzini     bool waited = false;
115fbcc3e50SPaolo Bonzini 
116fbcc3e50SPaolo Bonzini     for (;;) {
117fbcc3e50SPaolo Bonzini         if (val >= QEMU_LOCKCNT_COUNT_STEP) {
118fbcc3e50SPaolo Bonzini             int expected = val;
119*d73415a3SStefan Hajnoczi             val = qatomic_cmpxchg(&lockcnt->count, val,
120*d73415a3SStefan Hajnoczi                                   val + QEMU_LOCKCNT_COUNT_STEP);
121fbcc3e50SPaolo Bonzini             if (val == expected) {
122fbcc3e50SPaolo Bonzini                 break;
123fbcc3e50SPaolo Bonzini             }
124fbcc3e50SPaolo Bonzini         } else {
125fbcc3e50SPaolo Bonzini             /* The fast path is (0, unlocked)->(1, unlocked).  */
126fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, QEMU_LOCKCNT_COUNT_STEP,
127fbcc3e50SPaolo Bonzini                                              &waited)) {
128fbcc3e50SPaolo Bonzini                 break;
129fbcc3e50SPaolo Bonzini             }
130fbcc3e50SPaolo Bonzini         }
131fbcc3e50SPaolo Bonzini     }
132fbcc3e50SPaolo Bonzini 
133fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, we should also wake one because
134fbcc3e50SPaolo Bonzini      * we are effectively releasing the lock that was given to us.  This is
135fbcc3e50SPaolo Bonzini      * the case where qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING
136fbcc3e50SPaolo Bonzini      * in the low bits, and qemu_lockcnt_inc_and_unlock would find it and
137fbcc3e50SPaolo Bonzini      * wake someone.
138fbcc3e50SPaolo Bonzini      */
139fbcc3e50SPaolo Bonzini     if (waited) {
140fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
141fbcc3e50SPaolo Bonzini     }
142fbcc3e50SPaolo Bonzini }
143fbcc3e50SPaolo Bonzini 
qemu_lockcnt_dec(QemuLockCnt * lockcnt)144fbcc3e50SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
145fbcc3e50SPaolo Bonzini {
146*d73415a3SStefan Hajnoczi     qatomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
147fbcc3e50SPaolo Bonzini }
148fbcc3e50SPaolo Bonzini 
149fbcc3e50SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
150fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
151fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
152fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_dec_and_lock(QemuLockCnt * lockcnt)153fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
154fbcc3e50SPaolo Bonzini {
155*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
156fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
157fbcc3e50SPaolo Bonzini     bool waited = false;
158fbcc3e50SPaolo Bonzini 
159fbcc3e50SPaolo Bonzini     for (;;) {
160fbcc3e50SPaolo Bonzini         if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) {
161fbcc3e50SPaolo Bonzini             int expected = val;
162*d73415a3SStefan Hajnoczi             val = qatomic_cmpxchg(&lockcnt->count, val,
163*d73415a3SStefan Hajnoczi                                   val - QEMU_LOCKCNT_COUNT_STEP);
164fbcc3e50SPaolo Bonzini             if (val == expected) {
165fbcc3e50SPaolo Bonzini                 break;
166fbcc3e50SPaolo Bonzini             }
167fbcc3e50SPaolo Bonzini         } else {
168fbcc3e50SPaolo Bonzini             /* If count is going 1->0, take the lock. The fast path is
169fbcc3e50SPaolo Bonzini              * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
170fbcc3e50SPaolo Bonzini              */
171fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
172fbcc3e50SPaolo Bonzini                 return true;
173fbcc3e50SPaolo Bonzini             }
174fbcc3e50SPaolo Bonzini 
175fbcc3e50SPaolo Bonzini             if (waited) {
176fbcc3e50SPaolo Bonzini                 /* At this point we do not know if there are more waiters.  Assume
177fbcc3e50SPaolo Bonzini                  * there are.
178fbcc3e50SPaolo Bonzini                  */
179fbcc3e50SPaolo Bonzini                 locked_state = QEMU_LOCKCNT_STATE_WAITING;
180fbcc3e50SPaolo Bonzini             }
181fbcc3e50SPaolo Bonzini         }
182fbcc3e50SPaolo Bonzini     }
183fbcc3e50SPaolo Bonzini 
184fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
185fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
186fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
187fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
188fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_unlock would find it and wake someone.
189fbcc3e50SPaolo Bonzini      */
190fbcc3e50SPaolo Bonzini     if (waited) {
191fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
192fbcc3e50SPaolo Bonzini     }
193fbcc3e50SPaolo Bonzini     return false;
194fbcc3e50SPaolo Bonzini }
195fbcc3e50SPaolo Bonzini 
196fbcc3e50SPaolo Bonzini /* If the counter is one, decrement it and return locked.  Otherwise do
197fbcc3e50SPaolo Bonzini  * nothing.
198fbcc3e50SPaolo Bonzini  *
199fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
200fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
201fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_dec_if_lock(QemuLockCnt * lockcnt)202fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
203fbcc3e50SPaolo Bonzini {
204*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
205fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
206fbcc3e50SPaolo Bonzini     bool waited = false;
207fbcc3e50SPaolo Bonzini 
208fbcc3e50SPaolo Bonzini     while (val < 2 * QEMU_LOCKCNT_COUNT_STEP) {
209fbcc3e50SPaolo Bonzini         /* If count is going 1->0, take the lock. The fast path is
210fbcc3e50SPaolo Bonzini          * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
211fbcc3e50SPaolo Bonzini          */
212fbcc3e50SPaolo Bonzini         if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
213fbcc3e50SPaolo Bonzini             return true;
214fbcc3e50SPaolo Bonzini         }
215fbcc3e50SPaolo Bonzini 
216fbcc3e50SPaolo Bonzini         if (waited) {
217fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
218fbcc3e50SPaolo Bonzini              * there are.
219fbcc3e50SPaolo Bonzini              */
220fbcc3e50SPaolo Bonzini             locked_state = QEMU_LOCKCNT_STATE_WAITING;
221fbcc3e50SPaolo Bonzini         }
222fbcc3e50SPaolo Bonzini     }
223fbcc3e50SPaolo Bonzini 
224fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
225fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
226fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
227fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
228fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_inc_and_unlock would find it and wake someone.
229fbcc3e50SPaolo Bonzini      */
230fbcc3e50SPaolo Bonzini     if (waited) {
231fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
232fbcc3e50SPaolo Bonzini     }
233fbcc3e50SPaolo Bonzini     return false;
234fbcc3e50SPaolo Bonzini }
235fbcc3e50SPaolo Bonzini 
qemu_lockcnt_lock(QemuLockCnt * lockcnt)236fbcc3e50SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
237fbcc3e50SPaolo Bonzini {
238*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
239fbcc3e50SPaolo Bonzini     int step = QEMU_LOCKCNT_STATE_LOCKED;
240fbcc3e50SPaolo Bonzini     bool waited = false;
241fbcc3e50SPaolo Bonzini 
242fbcc3e50SPaolo Bonzini     /* The third argument is only used if the low bits of val are 0
243fbcc3e50SPaolo Bonzini      * (QEMU_LOCKCNT_STATE_FREE), so just blindly mix in the desired
244fbcc3e50SPaolo Bonzini      * state.
245fbcc3e50SPaolo Bonzini      */
246fbcc3e50SPaolo Bonzini     while (!qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, val + step, &waited)) {
247fbcc3e50SPaolo Bonzini         if (waited) {
248fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
249fbcc3e50SPaolo Bonzini              * there are.
250fbcc3e50SPaolo Bonzini              */
251fbcc3e50SPaolo Bonzini             step = QEMU_LOCKCNT_STATE_WAITING;
252fbcc3e50SPaolo Bonzini         }
253fbcc3e50SPaolo Bonzini     }
254fbcc3e50SPaolo Bonzini }
255fbcc3e50SPaolo Bonzini 
qemu_lockcnt_inc_and_unlock(QemuLockCnt * lockcnt)256fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
257fbcc3e50SPaolo Bonzini {
258fbcc3e50SPaolo Bonzini     int expected, new, val;
259fbcc3e50SPaolo Bonzini 
260*d73415a3SStefan Hajnoczi     val = qatomic_read(&lockcnt->count);
261fbcc3e50SPaolo Bonzini     do {
262fbcc3e50SPaolo Bonzini         expected = val;
263fbcc3e50SPaolo Bonzini         new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK;
264fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
265*d73415a3SStefan Hajnoczi         val = qatomic_cmpxchg(&lockcnt->count, val, new);
266fbcc3e50SPaolo Bonzini     } while (val != expected);
267fbcc3e50SPaolo Bonzini 
268fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
269fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
270fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
271fbcc3e50SPaolo Bonzini     }
272fbcc3e50SPaolo Bonzini }
273fbcc3e50SPaolo Bonzini 
qemu_lockcnt_unlock(QemuLockCnt * lockcnt)274fbcc3e50SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
275fbcc3e50SPaolo Bonzini {
276fbcc3e50SPaolo Bonzini     int expected, new, val;
277fbcc3e50SPaolo Bonzini 
278*d73415a3SStefan Hajnoczi     val = qatomic_read(&lockcnt->count);
279fbcc3e50SPaolo Bonzini     do {
280fbcc3e50SPaolo Bonzini         expected = val;
281fbcc3e50SPaolo Bonzini         new = val & ~QEMU_LOCKCNT_STATE_MASK;
282fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
283*d73415a3SStefan Hajnoczi         val = qatomic_cmpxchg(&lockcnt->count, val, new);
284fbcc3e50SPaolo Bonzini     } while (val != expected);
285fbcc3e50SPaolo Bonzini 
286fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
287fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
288fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
289fbcc3e50SPaolo Bonzini     }
290fbcc3e50SPaolo Bonzini }
291fbcc3e50SPaolo Bonzini 
qemu_lockcnt_count(QemuLockCnt * lockcnt)292fbcc3e50SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
293fbcc3e50SPaolo Bonzini {
294*d73415a3SStefan Hajnoczi     return qatomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
295fbcc3e50SPaolo Bonzini }
296fbcc3e50SPaolo Bonzini #else
qemu_lockcnt_init(QemuLockCnt * lockcnt)29751dee5e4SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
29851dee5e4SPaolo Bonzini {
29951dee5e4SPaolo Bonzini     qemu_mutex_init(&lockcnt->mutex);
30051dee5e4SPaolo Bonzini     lockcnt->count = 0;
30151dee5e4SPaolo Bonzini }
30251dee5e4SPaolo Bonzini 
qemu_lockcnt_destroy(QemuLockCnt * lockcnt)30351dee5e4SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
30451dee5e4SPaolo Bonzini {
30551dee5e4SPaolo Bonzini     qemu_mutex_destroy(&lockcnt->mutex);
30651dee5e4SPaolo Bonzini }
30751dee5e4SPaolo Bonzini 
qemu_lockcnt_inc(QemuLockCnt * lockcnt)30851dee5e4SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
30951dee5e4SPaolo Bonzini {
31051dee5e4SPaolo Bonzini     int old;
31151dee5e4SPaolo Bonzini     for (;;) {
312*d73415a3SStefan Hajnoczi         old = qatomic_read(&lockcnt->count);
31351dee5e4SPaolo Bonzini         if (old == 0) {
31451dee5e4SPaolo Bonzini             qemu_lockcnt_lock(lockcnt);
31551dee5e4SPaolo Bonzini             qemu_lockcnt_inc_and_unlock(lockcnt);
31651dee5e4SPaolo Bonzini             return;
31751dee5e4SPaolo Bonzini         } else {
318*d73415a3SStefan Hajnoczi             if (qatomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
31951dee5e4SPaolo Bonzini                 return;
32051dee5e4SPaolo Bonzini             }
32151dee5e4SPaolo Bonzini         }
32251dee5e4SPaolo Bonzini     }
32351dee5e4SPaolo Bonzini }
32451dee5e4SPaolo Bonzini 
qemu_lockcnt_dec(QemuLockCnt * lockcnt)32551dee5e4SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
32651dee5e4SPaolo Bonzini {
327*d73415a3SStefan Hajnoczi     qatomic_dec(&lockcnt->count);
32851dee5e4SPaolo Bonzini }
32951dee5e4SPaolo Bonzini 
33051dee5e4SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
33151dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
33251dee5e4SPaolo Bonzini  * is taken.
33351dee5e4SPaolo Bonzini  */
qemu_lockcnt_dec_and_lock(QemuLockCnt * lockcnt)33451dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
33551dee5e4SPaolo Bonzini {
336*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
33751dee5e4SPaolo Bonzini     while (val > 1) {
338*d73415a3SStefan Hajnoczi         int old = qatomic_cmpxchg(&lockcnt->count, val, val - 1);
33951dee5e4SPaolo Bonzini         if (old != val) {
34051dee5e4SPaolo Bonzini             val = old;
34151dee5e4SPaolo Bonzini             continue;
34251dee5e4SPaolo Bonzini         }
34351dee5e4SPaolo Bonzini 
34451dee5e4SPaolo Bonzini         return false;
34551dee5e4SPaolo Bonzini     }
34651dee5e4SPaolo Bonzini 
34751dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
348*d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&lockcnt->count) == 1) {
34951dee5e4SPaolo Bonzini         return true;
35051dee5e4SPaolo Bonzini     }
35151dee5e4SPaolo Bonzini 
35251dee5e4SPaolo Bonzini     qemu_lockcnt_unlock(lockcnt);
35351dee5e4SPaolo Bonzini     return false;
35451dee5e4SPaolo Bonzini }
35551dee5e4SPaolo Bonzini 
35651dee5e4SPaolo Bonzini /* Decrement a counter and return locked if it is decremented to zero.
35751dee5e4SPaolo Bonzini  * Otherwise do nothing.
35851dee5e4SPaolo Bonzini  *
35951dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
36051dee5e4SPaolo Bonzini  * is taken.
36151dee5e4SPaolo Bonzini  */
qemu_lockcnt_dec_if_lock(QemuLockCnt * lockcnt)36251dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
36351dee5e4SPaolo Bonzini {
36451dee5e4SPaolo Bonzini     /* No need for acquire semantics if we return false.  */
365*d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
36651dee5e4SPaolo Bonzini     if (val > 1) {
36751dee5e4SPaolo Bonzini         return false;
36851dee5e4SPaolo Bonzini     }
36951dee5e4SPaolo Bonzini 
37051dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
371*d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&lockcnt->count) == 1) {
37251dee5e4SPaolo Bonzini         return true;
37351dee5e4SPaolo Bonzini     }
37451dee5e4SPaolo Bonzini 
37551dee5e4SPaolo Bonzini     qemu_lockcnt_inc_and_unlock(lockcnt);
37651dee5e4SPaolo Bonzini     return false;
37751dee5e4SPaolo Bonzini }
37851dee5e4SPaolo Bonzini 
qemu_lockcnt_lock(QemuLockCnt * lockcnt)37951dee5e4SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
38051dee5e4SPaolo Bonzini {
38151dee5e4SPaolo Bonzini     qemu_mutex_lock(&lockcnt->mutex);
38251dee5e4SPaolo Bonzini }
38351dee5e4SPaolo Bonzini 
qemu_lockcnt_inc_and_unlock(QemuLockCnt * lockcnt)38451dee5e4SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
38551dee5e4SPaolo Bonzini {
386*d73415a3SStefan Hajnoczi     qatomic_inc(&lockcnt->count);
38751dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
38851dee5e4SPaolo Bonzini }
38951dee5e4SPaolo Bonzini 
qemu_lockcnt_unlock(QemuLockCnt * lockcnt)39051dee5e4SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
39151dee5e4SPaolo Bonzini {
39251dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
39351dee5e4SPaolo Bonzini }
39451dee5e4SPaolo Bonzini 
qemu_lockcnt_count(QemuLockCnt * lockcnt)39551dee5e4SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
39651dee5e4SPaolo Bonzini {
397*d73415a3SStefan Hajnoczi     return qatomic_read(&lockcnt->count);
39851dee5e4SPaolo Bonzini }
399fbcc3e50SPaolo Bonzini #endif
400