xref: /openbmc/qemu/util/lockcnt.c (revision f774a677507966222624a9b2859f06ede7608100)
151dee5e4SPaolo Bonzini /*
251dee5e4SPaolo Bonzini  * QemuLockCnt implementation
351dee5e4SPaolo Bonzini  *
451dee5e4SPaolo Bonzini  * Copyright Red Hat, Inc. 2017
551dee5e4SPaolo Bonzini  *
651dee5e4SPaolo Bonzini  * Author:
751dee5e4SPaolo Bonzini  *   Paolo Bonzini <pbonzini@redhat.com>
851dee5e4SPaolo Bonzini  */
951dee5e4SPaolo Bonzini #include "qemu/osdep.h"
10*51483f6cSPeter Maydell #include "qemu/lockcnt.h"
1151dee5e4SPaolo Bonzini #include "qemu/thread.h"
1251dee5e4SPaolo Bonzini #include "qemu/atomic.h"
13fbcc3e50SPaolo Bonzini #include "trace.h"
1451dee5e4SPaolo Bonzini 
15fbcc3e50SPaolo Bonzini #ifdef CONFIG_LINUX
16fbcc3e50SPaolo Bonzini #include "qemu/futex.h"
17fbcc3e50SPaolo Bonzini 
18fbcc3e50SPaolo Bonzini /* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
19fbcc3e50SPaolo Bonzini  * For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
20fbcc3e50SPaolo Bonzini  * this is not the most relaxing citation I could make...).  It is similar
21fbcc3e50SPaolo Bonzini  * to mutex2 in the paper.
22fbcc3e50SPaolo Bonzini  */
23fbcc3e50SPaolo Bonzini 
24fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_MASK    3
25fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_FREE    0   /* free, uncontended */
26fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_LOCKED  1   /* locked, uncontended */
27fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_STATE_WAITING 2   /* locked, contended */
28fbcc3e50SPaolo Bonzini 
29fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_STEP    4
30fbcc3e50SPaolo Bonzini #define QEMU_LOCKCNT_COUNT_SHIFT   2
31fbcc3e50SPaolo Bonzini 
qemu_lockcnt_init(QemuLockCnt * lockcnt)32fbcc3e50SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
33fbcc3e50SPaolo Bonzini {
34fbcc3e50SPaolo Bonzini     lockcnt->count = 0;
35fbcc3e50SPaolo Bonzini }
36fbcc3e50SPaolo Bonzini 
qemu_lockcnt_destroy(QemuLockCnt * lockcnt)37fbcc3e50SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
38fbcc3e50SPaolo Bonzini {
39fbcc3e50SPaolo Bonzini }
40fbcc3e50SPaolo Bonzini 
41fbcc3e50SPaolo Bonzini /* *val is the current value of lockcnt->count.
42fbcc3e50SPaolo Bonzini  *
43fbcc3e50SPaolo Bonzini  * If the lock is free, try a cmpxchg from *val to new_if_free; return
44fbcc3e50SPaolo Bonzini  * true and set *val to the old value found by the cmpxchg in
45fbcc3e50SPaolo Bonzini  * lockcnt->count.
46fbcc3e50SPaolo Bonzini  *
47fbcc3e50SPaolo Bonzini  * If the lock is taken, wait for it to be released and return false
48fbcc3e50SPaolo Bonzini  * *without trying again to take the lock*.  Again, set *val to the
49fbcc3e50SPaolo Bonzini  * new value of lockcnt->count.
50fbcc3e50SPaolo Bonzini  *
51fbcc3e50SPaolo Bonzini  * If *waited is true on return, new_if_free's bottom two bits must not
52fbcc3e50SPaolo Bonzini  * be QEMU_LOCKCNT_STATE_LOCKED on subsequent calls, because the caller
53fbcc3e50SPaolo Bonzini  * does not know if there are other waiters.  Furthermore, after *waited
54fbcc3e50SPaolo Bonzini  * is set the caller has effectively acquired the lock.  If it returns
55fbcc3e50SPaolo Bonzini  * with the lock not taken, it must wake another futex waiter.
56fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt * lockcnt,int * val,int new_if_free,bool * waited)57fbcc3e50SPaolo Bonzini static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
58fbcc3e50SPaolo Bonzini                                          int new_if_free, bool *waited)
59fbcc3e50SPaolo Bonzini {
60fbcc3e50SPaolo Bonzini     /* Fast path for when the lock is free.  */
61fbcc3e50SPaolo Bonzini     if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_FREE) {
62fbcc3e50SPaolo Bonzini         int expected = *val;
63fbcc3e50SPaolo Bonzini 
64fbcc3e50SPaolo Bonzini         trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free);
65d73415a3SStefan Hajnoczi         *val = qatomic_cmpxchg(&lockcnt->count, expected, new_if_free);
66fbcc3e50SPaolo Bonzini         if (*val == expected) {
67fbcc3e50SPaolo Bonzini             trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free);
68fbcc3e50SPaolo Bonzini             *val = new_if_free;
69fbcc3e50SPaolo Bonzini             return true;
70fbcc3e50SPaolo Bonzini         }
71fbcc3e50SPaolo Bonzini     }
72fbcc3e50SPaolo Bonzini 
73fbcc3e50SPaolo Bonzini     /* The slow path moves from locked to waiting if necessary, then
74fbcc3e50SPaolo Bonzini      * does a futex wait.  Both steps can be repeated ad nauseam,
75fbcc3e50SPaolo Bonzini      * only getting out of the loop if we can have another shot at the
76fbcc3e50SPaolo Bonzini      * fast path.  Once we can, get out to compute the new destination
77fbcc3e50SPaolo Bonzini      * value for the fast path.
78fbcc3e50SPaolo Bonzini      */
79fbcc3e50SPaolo Bonzini     while ((*val & QEMU_LOCKCNT_STATE_MASK) != QEMU_LOCKCNT_STATE_FREE) {
80fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_LOCKED) {
81fbcc3e50SPaolo Bonzini             int expected = *val;
82fbcc3e50SPaolo Bonzini             int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING;
83fbcc3e50SPaolo Bonzini 
84fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_prepare(lockcnt, expected, new);
85d73415a3SStefan Hajnoczi             *val = qatomic_cmpxchg(&lockcnt->count, expected, new);
86fbcc3e50SPaolo Bonzini             if (*val == expected) {
87fbcc3e50SPaolo Bonzini                 *val = new;
88fbcc3e50SPaolo Bonzini             }
89fbcc3e50SPaolo Bonzini             continue;
90fbcc3e50SPaolo Bonzini         }
91fbcc3e50SPaolo Bonzini 
92fbcc3e50SPaolo Bonzini         if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_WAITING) {
93fbcc3e50SPaolo Bonzini             *waited = true;
94fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait(lockcnt, *val);
95fbcc3e50SPaolo Bonzini             qemu_futex_wait(&lockcnt->count, *val);
96d73415a3SStefan Hajnoczi             *val = qatomic_read(&lockcnt->count);
97fbcc3e50SPaolo Bonzini             trace_lockcnt_futex_wait_resume(lockcnt, *val);
98fbcc3e50SPaolo Bonzini             continue;
99fbcc3e50SPaolo Bonzini         }
100fbcc3e50SPaolo Bonzini 
101fbcc3e50SPaolo Bonzini         abort();
102fbcc3e50SPaolo Bonzini     }
103fbcc3e50SPaolo Bonzini     return false;
104fbcc3e50SPaolo Bonzini }
105fbcc3e50SPaolo Bonzini 
lockcnt_wake(QemuLockCnt * lockcnt)106fbcc3e50SPaolo Bonzini static void lockcnt_wake(QemuLockCnt *lockcnt)
107fbcc3e50SPaolo Bonzini {
108fbcc3e50SPaolo Bonzini     trace_lockcnt_futex_wake(lockcnt);
109fbcc3e50SPaolo Bonzini     qemu_futex_wake(&lockcnt->count, 1);
110fbcc3e50SPaolo Bonzini }
111fbcc3e50SPaolo Bonzini 
qemu_lockcnt_inc(QemuLockCnt * lockcnt)112fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
113fbcc3e50SPaolo Bonzini {
114d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
115fbcc3e50SPaolo Bonzini     bool waited = false;
116fbcc3e50SPaolo Bonzini 
117fbcc3e50SPaolo Bonzini     for (;;) {
118fbcc3e50SPaolo Bonzini         if (val >= QEMU_LOCKCNT_COUNT_STEP) {
119fbcc3e50SPaolo Bonzini             int expected = val;
120d73415a3SStefan Hajnoczi             val = qatomic_cmpxchg(&lockcnt->count, val,
121d73415a3SStefan Hajnoczi                                   val + QEMU_LOCKCNT_COUNT_STEP);
122fbcc3e50SPaolo Bonzini             if (val == expected) {
123fbcc3e50SPaolo Bonzini                 break;
124fbcc3e50SPaolo Bonzini             }
125fbcc3e50SPaolo Bonzini         } else {
126fbcc3e50SPaolo Bonzini             /* The fast path is (0, unlocked)->(1, unlocked).  */
127fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, QEMU_LOCKCNT_COUNT_STEP,
128fbcc3e50SPaolo Bonzini                                              &waited)) {
129fbcc3e50SPaolo Bonzini                 break;
130fbcc3e50SPaolo Bonzini             }
131fbcc3e50SPaolo Bonzini         }
132fbcc3e50SPaolo Bonzini     }
133fbcc3e50SPaolo Bonzini 
134fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, we should also wake one because
135fbcc3e50SPaolo Bonzini      * we are effectively releasing the lock that was given to us.  This is
136fbcc3e50SPaolo Bonzini      * the case where qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING
137fbcc3e50SPaolo Bonzini      * in the low bits, and qemu_lockcnt_inc_and_unlock would find it and
138fbcc3e50SPaolo Bonzini      * wake someone.
139fbcc3e50SPaolo Bonzini      */
140fbcc3e50SPaolo Bonzini     if (waited) {
141fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
142fbcc3e50SPaolo Bonzini     }
143fbcc3e50SPaolo Bonzini }
144fbcc3e50SPaolo Bonzini 
qemu_lockcnt_dec(QemuLockCnt * lockcnt)145fbcc3e50SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
146fbcc3e50SPaolo Bonzini {
147d73415a3SStefan Hajnoczi     qatomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
148fbcc3e50SPaolo Bonzini }
149fbcc3e50SPaolo Bonzini 
150fbcc3e50SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
151fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
152fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
153fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_dec_and_lock(QemuLockCnt * lockcnt)154fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
155fbcc3e50SPaolo Bonzini {
156d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
157fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
158fbcc3e50SPaolo Bonzini     bool waited = false;
159fbcc3e50SPaolo Bonzini 
160fbcc3e50SPaolo Bonzini     for (;;) {
161fbcc3e50SPaolo Bonzini         if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) {
162fbcc3e50SPaolo Bonzini             int expected = val;
163d73415a3SStefan Hajnoczi             val = qatomic_cmpxchg(&lockcnt->count, val,
164d73415a3SStefan Hajnoczi                                   val - QEMU_LOCKCNT_COUNT_STEP);
165fbcc3e50SPaolo Bonzini             if (val == expected) {
166fbcc3e50SPaolo Bonzini                 break;
167fbcc3e50SPaolo Bonzini             }
168fbcc3e50SPaolo Bonzini         } else {
169fbcc3e50SPaolo Bonzini             /* If count is going 1->0, take the lock. The fast path is
170fbcc3e50SPaolo Bonzini              * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
171fbcc3e50SPaolo Bonzini              */
172fbcc3e50SPaolo Bonzini             if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
173fbcc3e50SPaolo Bonzini                 return true;
174fbcc3e50SPaolo Bonzini             }
175fbcc3e50SPaolo Bonzini 
176fbcc3e50SPaolo Bonzini             if (waited) {
177fbcc3e50SPaolo Bonzini                 /* At this point we do not know if there are more waiters.  Assume
178fbcc3e50SPaolo Bonzini                  * there are.
179fbcc3e50SPaolo Bonzini                  */
180fbcc3e50SPaolo Bonzini                 locked_state = QEMU_LOCKCNT_STATE_WAITING;
181fbcc3e50SPaolo Bonzini             }
182fbcc3e50SPaolo Bonzini         }
183fbcc3e50SPaolo Bonzini     }
184fbcc3e50SPaolo Bonzini 
185fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
186fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
187fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
188fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
189fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_unlock would find it and wake someone.
190fbcc3e50SPaolo Bonzini      */
191fbcc3e50SPaolo Bonzini     if (waited) {
192fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
193fbcc3e50SPaolo Bonzini     }
194fbcc3e50SPaolo Bonzini     return false;
195fbcc3e50SPaolo Bonzini }
196fbcc3e50SPaolo Bonzini 
197fbcc3e50SPaolo Bonzini /* If the counter is one, decrement it and return locked.  Otherwise do
198fbcc3e50SPaolo Bonzini  * nothing.
199fbcc3e50SPaolo Bonzini  *
200fbcc3e50SPaolo Bonzini  * If the function returns true, it is impossible for the counter to
201fbcc3e50SPaolo Bonzini  * become nonzero until the next qemu_lockcnt_unlock.
202fbcc3e50SPaolo Bonzini  */
qemu_lockcnt_dec_if_lock(QemuLockCnt * lockcnt)203fbcc3e50SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
204fbcc3e50SPaolo Bonzini {
205d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
206fbcc3e50SPaolo Bonzini     int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
207fbcc3e50SPaolo Bonzini     bool waited = false;
208fbcc3e50SPaolo Bonzini 
209fbcc3e50SPaolo Bonzini     while (val < 2 * QEMU_LOCKCNT_COUNT_STEP) {
210fbcc3e50SPaolo Bonzini         /* If count is going 1->0, take the lock. The fast path is
211fbcc3e50SPaolo Bonzini          * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
212fbcc3e50SPaolo Bonzini          */
213fbcc3e50SPaolo Bonzini         if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
214fbcc3e50SPaolo Bonzini             return true;
215fbcc3e50SPaolo Bonzini         }
216fbcc3e50SPaolo Bonzini 
217fbcc3e50SPaolo Bonzini         if (waited) {
218fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
219fbcc3e50SPaolo Bonzini              * there are.
220fbcc3e50SPaolo Bonzini              */
221fbcc3e50SPaolo Bonzini             locked_state = QEMU_LOCKCNT_STATE_WAITING;
222fbcc3e50SPaolo Bonzini         }
223fbcc3e50SPaolo Bonzini     }
224fbcc3e50SPaolo Bonzini 
225fbcc3e50SPaolo Bonzini     /* If we were woken by another thread, but we're returning in unlocked
226fbcc3e50SPaolo Bonzini      * state, we should also wake a thread because we are effectively
227fbcc3e50SPaolo Bonzini      * releasing the lock that was given to us.  This is the case where
228fbcc3e50SPaolo Bonzini      * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
229fbcc3e50SPaolo Bonzini      * bits, and qemu_lockcnt_inc_and_unlock would find it and wake someone.
230fbcc3e50SPaolo Bonzini      */
231fbcc3e50SPaolo Bonzini     if (waited) {
232fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
233fbcc3e50SPaolo Bonzini     }
234fbcc3e50SPaolo Bonzini     return false;
235fbcc3e50SPaolo Bonzini }
236fbcc3e50SPaolo Bonzini 
qemu_lockcnt_lock(QemuLockCnt * lockcnt)237fbcc3e50SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
238fbcc3e50SPaolo Bonzini {
239d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
240fbcc3e50SPaolo Bonzini     int step = QEMU_LOCKCNT_STATE_LOCKED;
241fbcc3e50SPaolo Bonzini     bool waited = false;
242fbcc3e50SPaolo Bonzini 
243fbcc3e50SPaolo Bonzini     /* The third argument is only used if the low bits of val are 0
244fbcc3e50SPaolo Bonzini      * (QEMU_LOCKCNT_STATE_FREE), so just blindly mix in the desired
245fbcc3e50SPaolo Bonzini      * state.
246fbcc3e50SPaolo Bonzini      */
247fbcc3e50SPaolo Bonzini     while (!qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, val + step, &waited)) {
248fbcc3e50SPaolo Bonzini         if (waited) {
249fbcc3e50SPaolo Bonzini             /* At this point we do not know if there are more waiters.  Assume
250fbcc3e50SPaolo Bonzini              * there are.
251fbcc3e50SPaolo Bonzini              */
252fbcc3e50SPaolo Bonzini             step = QEMU_LOCKCNT_STATE_WAITING;
253fbcc3e50SPaolo Bonzini         }
254fbcc3e50SPaolo Bonzini     }
255fbcc3e50SPaolo Bonzini }
256fbcc3e50SPaolo Bonzini 
qemu_lockcnt_inc_and_unlock(QemuLockCnt * lockcnt)257fbcc3e50SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
258fbcc3e50SPaolo Bonzini {
259fbcc3e50SPaolo Bonzini     int expected, new, val;
260fbcc3e50SPaolo Bonzini 
261d73415a3SStefan Hajnoczi     val = qatomic_read(&lockcnt->count);
262fbcc3e50SPaolo Bonzini     do {
263fbcc3e50SPaolo Bonzini         expected = val;
264fbcc3e50SPaolo Bonzini         new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK;
265fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
266d73415a3SStefan Hajnoczi         val = qatomic_cmpxchg(&lockcnt->count, val, new);
267fbcc3e50SPaolo Bonzini     } while (val != expected);
268fbcc3e50SPaolo Bonzini 
269fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
270fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
271fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
272fbcc3e50SPaolo Bonzini     }
273fbcc3e50SPaolo Bonzini }
274fbcc3e50SPaolo Bonzini 
qemu_lockcnt_unlock(QemuLockCnt * lockcnt)275fbcc3e50SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
276fbcc3e50SPaolo Bonzini {
277fbcc3e50SPaolo Bonzini     int expected, new, val;
278fbcc3e50SPaolo Bonzini 
279d73415a3SStefan Hajnoczi     val = qatomic_read(&lockcnt->count);
280fbcc3e50SPaolo Bonzini     do {
281fbcc3e50SPaolo Bonzini         expected = val;
282fbcc3e50SPaolo Bonzini         new = val & ~QEMU_LOCKCNT_STATE_MASK;
283fbcc3e50SPaolo Bonzini         trace_lockcnt_unlock_attempt(lockcnt, val, new);
284d73415a3SStefan Hajnoczi         val = qatomic_cmpxchg(&lockcnt->count, val, new);
285fbcc3e50SPaolo Bonzini     } while (val != expected);
286fbcc3e50SPaolo Bonzini 
287fbcc3e50SPaolo Bonzini     trace_lockcnt_unlock_success(lockcnt, val, new);
288fbcc3e50SPaolo Bonzini     if (val & QEMU_LOCKCNT_STATE_WAITING) {
289fbcc3e50SPaolo Bonzini         lockcnt_wake(lockcnt);
290fbcc3e50SPaolo Bonzini     }
291fbcc3e50SPaolo Bonzini }
292fbcc3e50SPaolo Bonzini 
qemu_lockcnt_count(QemuLockCnt * lockcnt)293fbcc3e50SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
294fbcc3e50SPaolo Bonzini {
295d73415a3SStefan Hajnoczi     return qatomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
296fbcc3e50SPaolo Bonzini }
297fbcc3e50SPaolo Bonzini #else
qemu_lockcnt_init(QemuLockCnt * lockcnt)29851dee5e4SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
29951dee5e4SPaolo Bonzini {
30051dee5e4SPaolo Bonzini     qemu_mutex_init(&lockcnt->mutex);
30151dee5e4SPaolo Bonzini     lockcnt->count = 0;
30251dee5e4SPaolo Bonzini }
30351dee5e4SPaolo Bonzini 
qemu_lockcnt_destroy(QemuLockCnt * lockcnt)30451dee5e4SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
30551dee5e4SPaolo Bonzini {
30651dee5e4SPaolo Bonzini     qemu_mutex_destroy(&lockcnt->mutex);
30751dee5e4SPaolo Bonzini }
30851dee5e4SPaolo Bonzini 
qemu_lockcnt_inc(QemuLockCnt * lockcnt)30951dee5e4SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
31051dee5e4SPaolo Bonzini {
31151dee5e4SPaolo Bonzini     int old;
31251dee5e4SPaolo Bonzini     for (;;) {
313d73415a3SStefan Hajnoczi         old = qatomic_read(&lockcnt->count);
31451dee5e4SPaolo Bonzini         if (old == 0) {
31551dee5e4SPaolo Bonzini             qemu_lockcnt_lock(lockcnt);
31651dee5e4SPaolo Bonzini             qemu_lockcnt_inc_and_unlock(lockcnt);
31751dee5e4SPaolo Bonzini             return;
31851dee5e4SPaolo Bonzini         } else {
319d73415a3SStefan Hajnoczi             if (qatomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
32051dee5e4SPaolo Bonzini                 return;
32151dee5e4SPaolo Bonzini             }
32251dee5e4SPaolo Bonzini         }
32351dee5e4SPaolo Bonzini     }
32451dee5e4SPaolo Bonzini }
32551dee5e4SPaolo Bonzini 
qemu_lockcnt_dec(QemuLockCnt * lockcnt)32651dee5e4SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
32751dee5e4SPaolo Bonzini {
328d73415a3SStefan Hajnoczi     qatomic_dec(&lockcnt->count);
32951dee5e4SPaolo Bonzini }
33051dee5e4SPaolo Bonzini 
33151dee5e4SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
33251dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
33351dee5e4SPaolo Bonzini  * is taken.
33451dee5e4SPaolo Bonzini  */
qemu_lockcnt_dec_and_lock(QemuLockCnt * lockcnt)33551dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
33651dee5e4SPaolo Bonzini {
337d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
33851dee5e4SPaolo Bonzini     while (val > 1) {
339d73415a3SStefan Hajnoczi         int old = qatomic_cmpxchg(&lockcnt->count, val, val - 1);
34051dee5e4SPaolo Bonzini         if (old != val) {
34151dee5e4SPaolo Bonzini             val = old;
34251dee5e4SPaolo Bonzini             continue;
34351dee5e4SPaolo Bonzini         }
34451dee5e4SPaolo Bonzini 
34551dee5e4SPaolo Bonzini         return false;
34651dee5e4SPaolo Bonzini     }
34751dee5e4SPaolo Bonzini 
34851dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
349d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&lockcnt->count) == 1) {
35051dee5e4SPaolo Bonzini         return true;
35151dee5e4SPaolo Bonzini     }
35251dee5e4SPaolo Bonzini 
35351dee5e4SPaolo Bonzini     qemu_lockcnt_unlock(lockcnt);
35451dee5e4SPaolo Bonzini     return false;
35551dee5e4SPaolo Bonzini }
35651dee5e4SPaolo Bonzini 
35751dee5e4SPaolo Bonzini /* Decrement a counter and return locked if it is decremented to zero.
35851dee5e4SPaolo Bonzini  * Otherwise do nothing.
35951dee5e4SPaolo Bonzini  *
36051dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
36151dee5e4SPaolo Bonzini  * is taken.
36251dee5e4SPaolo Bonzini  */
qemu_lockcnt_dec_if_lock(QemuLockCnt * lockcnt)36351dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
36451dee5e4SPaolo Bonzini {
36551dee5e4SPaolo Bonzini     /* No need for acquire semantics if we return false.  */
366d73415a3SStefan Hajnoczi     int val = qatomic_read(&lockcnt->count);
36751dee5e4SPaolo Bonzini     if (val > 1) {
36851dee5e4SPaolo Bonzini         return false;
36951dee5e4SPaolo Bonzini     }
37051dee5e4SPaolo Bonzini 
37151dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
372d73415a3SStefan Hajnoczi     if (qatomic_fetch_dec(&lockcnt->count) == 1) {
37351dee5e4SPaolo Bonzini         return true;
37451dee5e4SPaolo Bonzini     }
37551dee5e4SPaolo Bonzini 
37651dee5e4SPaolo Bonzini     qemu_lockcnt_inc_and_unlock(lockcnt);
37751dee5e4SPaolo Bonzini     return false;
37851dee5e4SPaolo Bonzini }
37951dee5e4SPaolo Bonzini 
qemu_lockcnt_lock(QemuLockCnt * lockcnt)38051dee5e4SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
38151dee5e4SPaolo Bonzini {
38251dee5e4SPaolo Bonzini     qemu_mutex_lock(&lockcnt->mutex);
38351dee5e4SPaolo Bonzini }
38451dee5e4SPaolo Bonzini 
qemu_lockcnt_inc_and_unlock(QemuLockCnt * lockcnt)38551dee5e4SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
38651dee5e4SPaolo Bonzini {
387d73415a3SStefan Hajnoczi     qatomic_inc(&lockcnt->count);
38851dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
38951dee5e4SPaolo Bonzini }
39051dee5e4SPaolo Bonzini 
qemu_lockcnt_unlock(QemuLockCnt * lockcnt)39151dee5e4SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
39251dee5e4SPaolo Bonzini {
39351dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
39451dee5e4SPaolo Bonzini }
39551dee5e4SPaolo Bonzini 
qemu_lockcnt_count(QemuLockCnt * lockcnt)39651dee5e4SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
39751dee5e4SPaolo Bonzini {
398d73415a3SStefan Hajnoczi     return qatomic_read(&lockcnt->count);
39951dee5e4SPaolo Bonzini }
400fbcc3e50SPaolo Bonzini #endif
401