xref: /openbmc/qemu/util/lockcnt.c (revision fbcc3e50)
151dee5e4SPaolo Bonzini /*
251dee5e4SPaolo Bonzini  * QemuLockCnt implementation
351dee5e4SPaolo Bonzini  *
451dee5e4SPaolo Bonzini  * Copyright Red Hat, Inc. 2017
551dee5e4SPaolo Bonzini  *
651dee5e4SPaolo Bonzini  * Author:
751dee5e4SPaolo Bonzini  *   Paolo Bonzini <pbonzini@redhat.com>
851dee5e4SPaolo Bonzini  */
951dee5e4SPaolo Bonzini #include "qemu/osdep.h"
1051dee5e4SPaolo Bonzini #include "qemu/thread.h"
1151dee5e4SPaolo Bonzini #include "qemu/atomic.h"
12*fbcc3e50SPaolo Bonzini #include "trace.h"
1351dee5e4SPaolo Bonzini 
14*fbcc3e50SPaolo Bonzini #ifdef CONFIG_LINUX
15*fbcc3e50SPaolo Bonzini #include "qemu/futex.h"
16*fbcc3e50SPaolo Bonzini 
17*fbcc3e50SPaolo Bonzini /* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
18*fbcc3e50SPaolo Bonzini  * For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
19*fbcc3e50SPaolo Bonzini  * this is not the most relaxing citation I could make...).  It is similar
20*fbcc3e50SPaolo Bonzini  * to mutex2 in the paper.
21*fbcc3e50SPaolo Bonzini  */
22*fbcc3e50SPaolo Bonzini 
23*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_MASK    3
24*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_FREE    0   /* free, uncontended */
25*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_LOCKED  1   /* locked, uncontended */
26*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_WAITING 2   /* locked, contended */
27*fbcc3e50SPaolo Bonzini 
28*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_STEP    4
29*fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_SHIFT   2
30*fbcc3e50SPaolo Bonzini 
31*fbcc3e50SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
32*fbcc3e50SPaolo Bonzini {
33*fbcc3e50SPaolo Bonzini     lockcnt->count = 0;
34*fbcc3e50SPaolo Bonzini }
35*fbcc3e50SPaolo Bonzini 
36*fbcc3e50SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
37*fbcc3e50SPaolo Bonzini {
38*fbcc3e50SPaolo Bonzini }
39*fbcc3e50SPaolo Bonzini 
40*fbcc3e50SPaolo Bonzini /* *val is the current value of lockcnt->count.
41*fbcc3e50SPaolo Bonzini  *
42*fbcc3e50SPaolo Bonzini  * If the lock is free, try a cmpxchg from *val to new_if_free; return
43*fbcc3e50SPaolo Bonzini  * true and set *val to the old value found by the cmpxchg in
44*fbcc3e50SPaolo Bonzini  * lockcnt->count.
45*fbcc3e50SPaolo Bonzini  *
46*fbcc3e50SPaolo Bonzini  * If the lock is taken, wait for it to be released and return false
47*fbcc3e50SPaolo Bonzini  * *without trying again to take the lock*.  Again, set *val to the
48*fbcc3e50SPaolo Bonzini  * new value of lockcnt->count.
49*fbcc3e50SPaolo Bonzini  *
50*fbcc3e50SPaolo Bonzini  * If *waited is true on return, new_if_free's bottom two bits must not
51*fbcc3e50SPaolo Bonzini  * be QEMU_LOCKCNT_STATE_LOCKED on subsequent calls, because the caller
52*fbcc3e50SPaolo Bonzini  * does not know if there are other waiters.  Furthermore, after *waited
53*fbcc3e50SPaolo Bonzini  * is set the caller has effectively acquired the lock.  If it returns
54*fbcc3e50SPaolo Bonzini  * with the lock not taken, it must wake another futex waiter.
55*fbcc3e50SPaolo Bonzini  */
56*fbcc3e50SPaolo Bonzini static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
57*fbcc3e50SPaolo Bonzini                                          int new_if_free, bool *waited)
58*fbcc3e50SPaolo Bonzini {
59*fbcc3e50SPaolo Bonzini     /* Fast path for when the lock is free.  */
60*fbcc3e50SPaolo Bonzini     if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_FREE) {
61*fbcc3e50SPaolo Bonzini         int expected = *val;
62*fbcc3e50SPaolo Bonzini 
63*fbcc3e50SPaolo Bonzini         trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free);
64*fbcc3e50SPaolo Bonzini         *val = atomic_cmpxchg(&lockcnt->count, expected, new_if_free);
65*fbcc3e50SPaolo Bonzini         if (*val == expected) {
66*fbcc3e50SPaolo Bonzini             trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free);
67*fbcc3e50SPaolo Bonzini             *val = new_if_free;
68*fbcc3e50SPaolo Bonzini             return true;
69*fbcc3e50SPaolo Bonzini         }
70*fbcc3e50SPaolo Bonzini     }
71*fbcc3e50SPaolo Bonzini 
72*fbcc3e50SPaolo Bonzini     /* The slow path moves from locked to waiting if necessary, then
73*fbcc3e50SPaolo Bonzini      * does a futex wait.  Both steps can be repeated ad nauseam,
74*fbcc3e50SPaolo Bonzini      * only getting out of the loop if we can have another shot at the
75*fbcc3e50SPaolo Bonzini      * fast path.  Once we can, get out to compute the new destination
76*fbcc3e50SPaolo Bonzini      * value for the fast path.
77*fbcc3e50SPaolo Bonzini      */
78*fbcc3e50SPaolo Bonzini     while ((*val & QEMU_LOCKCNT_STATE_MASK) != QEMU_LOCKCNT_STATE_FREE) {
79*fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_LOCKED) {
80*fbcc3e50SPaolo Bonzini             int expected = *val;
81*fbcc3e50SPaolo Bonzini             int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING;
82*fbcc3e50SPaolo Bonzini 
83*fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_prepare(lockcnt, expected, new);
84*fbcc3e50SPaolo Bonzini             *val = atomic_cmpxchg(&lockcnt->count, expected, new);
85*fbcc3e50SPaolo Bonzini             if (*val == expected) {
86*fbcc3e50SPaolo Bonzini                 *val = new;
87*fbcc3e50SPaolo Bonzini             }
88*fbcc3e50SPaolo Bonzini             continue;
89*fbcc3e50SPaolo Bonzini         }
90*fbcc3e50SPaolo Bonzini 
91*fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_WAITING) {
92*fbcc3e50SPaolo Bonzini             *waited = true;
93*fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait(lockcnt, *val);
94*fbcc3e50SPaolo Bonzini             qemu_futex_wait(&lockcnt->count, *val);
95*fbcc3e50SPaolo Bonzini             *val = atomic_read(&lockcnt->count);
96*fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_resume(lockcnt, *val);
97*fbcc3e50SPaolo Bonzini             continue;
98*fbcc3e50SPaolo Bonzini         }
99*fbcc3e50SPaolo Bonzini 
100*fbcc3e50SPaolo Bonzini         abort();
101*fbcc3e50SPaolo Bonzini     }
102*fbcc3e50SPaolo Bonzini     return false;
103*fbcc3e50SPaolo Bonzini }
104*fbcc3e50SPaolo Bonzini 
105*fbcc3e50SPaolo Bonzini static void lockcnt_wake(QemuLockCnt *lockcnt)
106*fbcc3e50SPaolo Bonzini {
107*fbcc3e50SPaolo Bonzini     trace_lockcnt_futex_wake(lockcnt);
108*fbcc3e50SPaolo Bonzini     qemu_futex_wake(&lockcnt->count, 1);
109*fbcc3e50SPaolo Bonzini }
110*fbcc3e50SPaolo Bonzini 
111*fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
112*fbcc3e50SPaolo Bonzini {
113*fbcc3e50SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
114*fbcc3e50SPaolo Bonzini     bool waited = false;
115*fbcc3e50SPaolo Bonzini 
116*fbcc3e50SPaolo Bonzini     for (;;) {
117*fbcc3e50SPaolo Bonzini         if (val >= QEMU_LOCKCNT_COUNT_STEP) {
118*fbcc3e50SPaolo Bonzini             int expected = val;
119*fbcc3e50SPaolo Bonzini             val = atomic_cmpxchg(&lockcnt->count, val, val + QEMU_LOCKCNT_COUNT_STEP);
120*fbcc3e50SPaolo Bonzini             if (val == expected) {
121*fbcc3e50SPaolo Bonzini                 break;
122*fbcc3e50SPaolo Bonzini             }
123*fbcc3e50SPaolo Bonzini         } else {
124*fbcc3e50SPaolo Bonzini             /* The fast path is (0, unlocked)->(1, unlocked).  */
125*fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, QEMU_LOCKCNT_COUNT_STEP,
126*fbcc3e50SPaolo Bonzini                                              &waited)) {
127*fbcc3e50SPaolo Bonzini                 break;
128*fbcc3e50SPaolo Bonzini             }
129*fbcc3e50SPaolo Bonzini         }
130*fbcc3e50SPaolo Bonzini     }
131*fbcc3e50SPaolo Bonzini 
132*fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, we should also wake one because
133*fbcc3e50SPaolo Bonzini      * we are effectively releasing the lock that was given to us.  This is
134*fbcc3e50SPaolo Bonzini      * the case where qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING
135*fbcc3e50SPaolo Bonzini      * in the low bits, and qemu_lockcnt_inc_and_unlock would find it and
136*fbcc3e50SPaolo Bonzini      * wake someone.
137*fbcc3e50SPaolo Bonzini      */
138*fbcc3e50SPaolo Bonzini     if (waited) {
139*fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
140*fbcc3e50SPaolo Bonzini     }
141*fbcc3e50SPaolo Bonzini }
142*fbcc3e50SPaolo Bonzini 
143*fbcc3e50SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
144*fbcc3e50SPaolo Bonzini {
145*fbcc3e50SPaolo Bonzini     atomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
146*fbcc3e50SPaolo Bonzini }
147*fbcc3e50SPaolo Bonzini 
148*fbcc3e50SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
149*fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
150*fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
151*fbcc3e50SPaolo Bonzini  */
152*fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
153*fbcc3e50SPaolo Bonzini {
154*fbcc3e50SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
155*fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
156*fbcc3e50SPaolo Bonzini     bool waited = false;
157*fbcc3e50SPaolo Bonzini 
158*fbcc3e50SPaolo Bonzini     for (;;) {
159*fbcc3e50SPaolo Bonzini         if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) {
160*fbcc3e50SPaolo Bonzini             int expected = val;
161*fbcc3e50SPaolo Bonzini             val = atomic_cmpxchg(&lockcnt->count, val, val - QEMU_LOCKCNT_COUNT_STEP);
162*fbcc3e50SPaolo Bonzini             if (val == expected) {
163*fbcc3e50SPaolo Bonzini                 break;
164*fbcc3e50SPaolo Bonzini             }
165*fbcc3e50SPaolo Bonzini         } else {
166*fbcc3e50SPaolo Bonzini             /* If count is going 1->0, take the lock. The fast path is
167*fbcc3e50SPaolo Bonzini              * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
168*fbcc3e50SPaolo Bonzini              */
169*fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
170*fbcc3e50SPaolo Bonzini                 return true;
171*fbcc3e50SPaolo Bonzini             }
172*fbcc3e50SPaolo Bonzini 
173*fbcc3e50SPaolo Bonzini             if (waited) {
174*fbcc3e50SPaolo Bonzini                 /* At this point we do not know if there are more waiters.  Assume
175*fbcc3e50SPaolo Bonzini                  * there are.
176*fbcc3e50SPaolo Bonzini                  */
177*fbcc3e50SPaolo Bonzini                 locked_state = QEMU_LOCKCNT_STATE_WAITING;
178*fbcc3e50SPaolo Bonzini             }
179*fbcc3e50SPaolo Bonzini         }
180*fbcc3e50SPaolo Bonzini     }
181*fbcc3e50SPaolo Bonzini 
182*fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
183*fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
184*fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
185*fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
186*fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_unlock would find it and wake someone.
187*fbcc3e50SPaolo Bonzini      */
188*fbcc3e50SPaolo Bonzini     if (waited) {
189*fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
190*fbcc3e50SPaolo Bonzini     }
191*fbcc3e50SPaolo Bonzini     return false;
192*fbcc3e50SPaolo Bonzini }
193*fbcc3e50SPaolo Bonzini 
194*fbcc3e50SPaolo Bonzini /* If the counter is one, decrement it and return locked.  Otherwise do
195*fbcc3e50SPaolo Bonzini  * nothing.
196*fbcc3e50SPaolo Bonzini  *
197*fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
198*fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
199*fbcc3e50SPaolo Bonzini  */
200*fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
201*fbcc3e50SPaolo Bonzini {
202*fbcc3e50SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
203*fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
204*fbcc3e50SPaolo Bonzini     bool waited = false;
205*fbcc3e50SPaolo Bonzini 
206*fbcc3e50SPaolo Bonzini     while (val < 2 * QEMU_LOCKCNT_COUNT_STEP) {
207*fbcc3e50SPaolo Bonzini         /* If count is going 1->0, take the lock. The fast path is
208*fbcc3e50SPaolo Bonzini          * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
209*fbcc3e50SPaolo Bonzini          */
210*fbcc3e50SPaolo Bonzini         if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
211*fbcc3e50SPaolo Bonzini             return true;
212*fbcc3e50SPaolo Bonzini         }
213*fbcc3e50SPaolo Bonzini 
214*fbcc3e50SPaolo Bonzini         if (waited) {
215*fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
216*fbcc3e50SPaolo Bonzini              * there are.
217*fbcc3e50SPaolo Bonzini              */
218*fbcc3e50SPaolo Bonzini             locked_state = QEMU_LOCKCNT_STATE_WAITING;
219*fbcc3e50SPaolo Bonzini         }
220*fbcc3e50SPaolo Bonzini     }
221*fbcc3e50SPaolo Bonzini 
222*fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
223*fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
224*fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
225*fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
226*fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_inc_and_unlock would find it and wake someone.
227*fbcc3e50SPaolo Bonzini      */
228*fbcc3e50SPaolo Bonzini     if (waited) {
229*fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
230*fbcc3e50SPaolo Bonzini     }
231*fbcc3e50SPaolo Bonzini     return false;
232*fbcc3e50SPaolo Bonzini }
233*fbcc3e50SPaolo Bonzini 
234*fbcc3e50SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
235*fbcc3e50SPaolo Bonzini {
236*fbcc3e50SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
237*fbcc3e50SPaolo Bonzini     int step = QEMU_LOCKCNT_STATE_LOCKED;
238*fbcc3e50SPaolo Bonzini     bool waited = false;
239*fbcc3e50SPaolo Bonzini 
240*fbcc3e50SPaolo Bonzini     /* The third argument is only used if the low bits of val are 0
241*fbcc3e50SPaolo Bonzini      * (QEMU_LOCKCNT_STATE_FREE), so just blindly mix in the desired
242*fbcc3e50SPaolo Bonzini      * state.
243*fbcc3e50SPaolo Bonzini      */
244*fbcc3e50SPaolo Bonzini     while (!qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, val + step, &waited)) {
245*fbcc3e50SPaolo Bonzini         if (waited) {
246*fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
247*fbcc3e50SPaolo Bonzini              * there are.
248*fbcc3e50SPaolo Bonzini              */
249*fbcc3e50SPaolo Bonzini             step = QEMU_LOCKCNT_STATE_WAITING;
250*fbcc3e50SPaolo Bonzini         }
251*fbcc3e50SPaolo Bonzini     }
252*fbcc3e50SPaolo Bonzini }
253*fbcc3e50SPaolo Bonzini 
254*fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
255*fbcc3e50SPaolo Bonzini {
256*fbcc3e50SPaolo Bonzini     int expected, new, val;
257*fbcc3e50SPaolo Bonzini 
258*fbcc3e50SPaolo Bonzini     val = atomic_read(&lockcnt->count);
259*fbcc3e50SPaolo Bonzini     do {
260*fbcc3e50SPaolo Bonzini         expected = val;
261*fbcc3e50SPaolo Bonzini         new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK;
262*fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
263*fbcc3e50SPaolo Bonzini         val = atomic_cmpxchg(&lockcnt->count, val, new);
264*fbcc3e50SPaolo Bonzini     } while (val != expected);
265*fbcc3e50SPaolo Bonzini 
266*fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
267*fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
268*fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
269*fbcc3e50SPaolo Bonzini     }
270*fbcc3e50SPaolo Bonzini }
271*fbcc3e50SPaolo Bonzini 
272*fbcc3e50SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
273*fbcc3e50SPaolo Bonzini {
274*fbcc3e50SPaolo Bonzini     int expected, new, val;
275*fbcc3e50SPaolo Bonzini 
276*fbcc3e50SPaolo Bonzini     val = atomic_read(&lockcnt->count);
277*fbcc3e50SPaolo Bonzini     do {
278*fbcc3e50SPaolo Bonzini         expected = val;
279*fbcc3e50SPaolo Bonzini         new = val & ~QEMU_LOCKCNT_STATE_MASK;
280*fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
281*fbcc3e50SPaolo Bonzini         val = atomic_cmpxchg(&lockcnt->count, val, new);
282*fbcc3e50SPaolo Bonzini     } while (val != expected);
283*fbcc3e50SPaolo Bonzini 
284*fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
285*fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
286*fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
287*fbcc3e50SPaolo Bonzini     }
288*fbcc3e50SPaolo Bonzini }
289*fbcc3e50SPaolo Bonzini 
290*fbcc3e50SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
291*fbcc3e50SPaolo Bonzini {
292*fbcc3e50SPaolo Bonzini     return atomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
293*fbcc3e50SPaolo Bonzini }
294*fbcc3e50SPaolo Bonzini #else
29551dee5e4SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
29651dee5e4SPaolo Bonzini {
29751dee5e4SPaolo Bonzini     qemu_mutex_init(&lockcnt->mutex);
29851dee5e4SPaolo Bonzini     lockcnt->count = 0;
29951dee5e4SPaolo Bonzini }
30051dee5e4SPaolo Bonzini 
30151dee5e4SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
30251dee5e4SPaolo Bonzini {
30351dee5e4SPaolo Bonzini     qemu_mutex_destroy(&lockcnt->mutex);
30451dee5e4SPaolo Bonzini }
30551dee5e4SPaolo Bonzini 
30651dee5e4SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
30751dee5e4SPaolo Bonzini {
30851dee5e4SPaolo Bonzini     int old;
30951dee5e4SPaolo Bonzini     for (;;) {
31051dee5e4SPaolo Bonzini         old = atomic_read(&lockcnt->count);
31151dee5e4SPaolo Bonzini         if (old == 0) {
31251dee5e4SPaolo Bonzini             qemu_lockcnt_lock(lockcnt);
31351dee5e4SPaolo Bonzini             qemu_lockcnt_inc_and_unlock(lockcnt);
31451dee5e4SPaolo Bonzini             return;
31551dee5e4SPaolo Bonzini         } else {
31651dee5e4SPaolo Bonzini             if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
31751dee5e4SPaolo Bonzini                 return;
31851dee5e4SPaolo Bonzini             }
31951dee5e4SPaolo Bonzini         }
32051dee5e4SPaolo Bonzini     }
32151dee5e4SPaolo Bonzini }
32251dee5e4SPaolo Bonzini 
32351dee5e4SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
32451dee5e4SPaolo Bonzini {
32551dee5e4SPaolo Bonzini     atomic_dec(&lockcnt->count);
32651dee5e4SPaolo Bonzini }
32751dee5e4SPaolo Bonzini 
32851dee5e4SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
32951dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
33051dee5e4SPaolo Bonzini  * is taken.
33151dee5e4SPaolo Bonzini  */
33251dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
33351dee5e4SPaolo Bonzini {
33451dee5e4SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
33551dee5e4SPaolo Bonzini     while (val > 1) {
33651dee5e4SPaolo Bonzini         int old = atomic_cmpxchg(&lockcnt->count, val, val - 1);
33751dee5e4SPaolo Bonzini         if (old != val) {
33851dee5e4SPaolo Bonzini             val = old;
33951dee5e4SPaolo Bonzini             continue;
34051dee5e4SPaolo Bonzini         }
34151dee5e4SPaolo Bonzini 
34251dee5e4SPaolo Bonzini         return false;
34351dee5e4SPaolo Bonzini     }
34451dee5e4SPaolo Bonzini 
34551dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
34651dee5e4SPaolo Bonzini     if (atomic_fetch_dec(&lockcnt->count) == 1) {
34751dee5e4SPaolo Bonzini         return true;
34851dee5e4SPaolo Bonzini     }
34951dee5e4SPaolo Bonzini 
35051dee5e4SPaolo Bonzini     qemu_lockcnt_unlock(lockcnt);
35151dee5e4SPaolo Bonzini     return false;
35251dee5e4SPaolo Bonzini }
35351dee5e4SPaolo Bonzini 
35451dee5e4SPaolo Bonzini /* Decrement a counter and return locked if it is decremented to zero.
35551dee5e4SPaolo Bonzini  * Otherwise do nothing.
35651dee5e4SPaolo Bonzini  *
35751dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
35851dee5e4SPaolo Bonzini  * is taken.
35951dee5e4SPaolo Bonzini  */
36051dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
36151dee5e4SPaolo Bonzini {
36251dee5e4SPaolo Bonzini     /* No need for acquire semantics if we return false.  */
36351dee5e4SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
36451dee5e4SPaolo Bonzini     if (val > 1) {
36551dee5e4SPaolo Bonzini         return false;
36651dee5e4SPaolo Bonzini     }
36751dee5e4SPaolo Bonzini 
36851dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
36951dee5e4SPaolo Bonzini     if (atomic_fetch_dec(&lockcnt->count) == 1) {
37051dee5e4SPaolo Bonzini         return true;
37151dee5e4SPaolo Bonzini     }
37251dee5e4SPaolo Bonzini 
37351dee5e4SPaolo Bonzini     qemu_lockcnt_inc_and_unlock(lockcnt);
37451dee5e4SPaolo Bonzini     return false;
37551dee5e4SPaolo Bonzini }
37651dee5e4SPaolo Bonzini 
37751dee5e4SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
37851dee5e4SPaolo Bonzini {
37951dee5e4SPaolo Bonzini     qemu_mutex_lock(&lockcnt->mutex);
38051dee5e4SPaolo Bonzini }
38151dee5e4SPaolo Bonzini 
38251dee5e4SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
38351dee5e4SPaolo Bonzini {
38451dee5e4SPaolo Bonzini     atomic_inc(&lockcnt->count);
38551dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
38651dee5e4SPaolo Bonzini }
38751dee5e4SPaolo Bonzini 
38851dee5e4SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
38951dee5e4SPaolo Bonzini {
39051dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
39151dee5e4SPaolo Bonzini }
39251dee5e4SPaolo Bonzini 
39351dee5e4SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
39451dee5e4SPaolo Bonzini {
39551dee5e4SPaolo Bonzini     return atomic_read(&lockcnt->count);
39651dee5e4SPaolo Bonzini }
397*fbcc3e50SPaolo Bonzini #endif
398