xref: /openbmc/qemu/util/lockcnt.c (revision 51dee5e4)
1*51dee5e4SPaolo Bonzini /*
2*51dee5e4SPaolo Bonzini  * QemuLockCnt implementation
3*51dee5e4SPaolo Bonzini  *
4*51dee5e4SPaolo Bonzini  * Copyright Red Hat, Inc. 2017
5*51dee5e4SPaolo Bonzini  *
6*51dee5e4SPaolo Bonzini  * Author:
7*51dee5e4SPaolo Bonzini  *   Paolo Bonzini <pbonzini@redhat.com>
8*51dee5e4SPaolo Bonzini  */
9*51dee5e4SPaolo Bonzini #include "qemu/osdep.h"
10*51dee5e4SPaolo Bonzini #include "qemu/thread.h"
11*51dee5e4SPaolo Bonzini #include "qemu/atomic.h"
12*51dee5e4SPaolo Bonzini 
13*51dee5e4SPaolo Bonzini void qemu_lockcnt_init(QemuLockCnt *lockcnt)
14*51dee5e4SPaolo Bonzini {
15*51dee5e4SPaolo Bonzini     qemu_mutex_init(&lockcnt->mutex);
16*51dee5e4SPaolo Bonzini     lockcnt->count = 0;
17*51dee5e4SPaolo Bonzini }
18*51dee5e4SPaolo Bonzini 
19*51dee5e4SPaolo Bonzini void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
20*51dee5e4SPaolo Bonzini {
21*51dee5e4SPaolo Bonzini     qemu_mutex_destroy(&lockcnt->mutex);
22*51dee5e4SPaolo Bonzini }
23*51dee5e4SPaolo Bonzini 
24*51dee5e4SPaolo Bonzini void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
25*51dee5e4SPaolo Bonzini {
26*51dee5e4SPaolo Bonzini     int old;
27*51dee5e4SPaolo Bonzini     for (;;) {
28*51dee5e4SPaolo Bonzini         old = atomic_read(&lockcnt->count);
29*51dee5e4SPaolo Bonzini         if (old == 0) {
30*51dee5e4SPaolo Bonzini             qemu_lockcnt_lock(lockcnt);
31*51dee5e4SPaolo Bonzini             qemu_lockcnt_inc_and_unlock(lockcnt);
32*51dee5e4SPaolo Bonzini             return;
33*51dee5e4SPaolo Bonzini         } else {
34*51dee5e4SPaolo Bonzini             if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
35*51dee5e4SPaolo Bonzini                 return;
36*51dee5e4SPaolo Bonzini             }
37*51dee5e4SPaolo Bonzini         }
38*51dee5e4SPaolo Bonzini     }
39*51dee5e4SPaolo Bonzini }
40*51dee5e4SPaolo Bonzini 
41*51dee5e4SPaolo Bonzini void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
42*51dee5e4SPaolo Bonzini {
43*51dee5e4SPaolo Bonzini     atomic_dec(&lockcnt->count);
44*51dee5e4SPaolo Bonzini }
45*51dee5e4SPaolo Bonzini 
46*51dee5e4SPaolo Bonzini /* Decrement a counter, and return locked if it is decremented to zero.
47*51dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
48*51dee5e4SPaolo Bonzini  * is taken.
49*51dee5e4SPaolo Bonzini  */
50*51dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
51*51dee5e4SPaolo Bonzini {
52*51dee5e4SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
53*51dee5e4SPaolo Bonzini     while (val > 1) {
54*51dee5e4SPaolo Bonzini         int old = atomic_cmpxchg(&lockcnt->count, val, val - 1);
55*51dee5e4SPaolo Bonzini         if (old != val) {
56*51dee5e4SPaolo Bonzini             val = old;
57*51dee5e4SPaolo Bonzini             continue;
58*51dee5e4SPaolo Bonzini         }
59*51dee5e4SPaolo Bonzini 
60*51dee5e4SPaolo Bonzini         return false;
61*51dee5e4SPaolo Bonzini     }
62*51dee5e4SPaolo Bonzini 
63*51dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
64*51dee5e4SPaolo Bonzini     if (atomic_fetch_dec(&lockcnt->count) == 1) {
65*51dee5e4SPaolo Bonzini         return true;
66*51dee5e4SPaolo Bonzini     }
67*51dee5e4SPaolo Bonzini 
68*51dee5e4SPaolo Bonzini     qemu_lockcnt_unlock(lockcnt);
69*51dee5e4SPaolo Bonzini     return false;
70*51dee5e4SPaolo Bonzini }
71*51dee5e4SPaolo Bonzini 
72*51dee5e4SPaolo Bonzini /* Decrement a counter and return locked if it is decremented to zero.
73*51dee5e4SPaolo Bonzini  * Otherwise do nothing.
74*51dee5e4SPaolo Bonzini  *
75*51dee5e4SPaolo Bonzini  * It is impossible for the counter to become nonzero while the mutex
76*51dee5e4SPaolo Bonzini  * is taken.
77*51dee5e4SPaolo Bonzini  */
78*51dee5e4SPaolo Bonzini bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
79*51dee5e4SPaolo Bonzini {
80*51dee5e4SPaolo Bonzini     /* No need for acquire semantics if we return false.  */
81*51dee5e4SPaolo Bonzini     int val = atomic_read(&lockcnt->count);
82*51dee5e4SPaolo Bonzini     if (val > 1) {
83*51dee5e4SPaolo Bonzini         return false;
84*51dee5e4SPaolo Bonzini     }
85*51dee5e4SPaolo Bonzini 
86*51dee5e4SPaolo Bonzini     qemu_lockcnt_lock(lockcnt);
87*51dee5e4SPaolo Bonzini     if (atomic_fetch_dec(&lockcnt->count) == 1) {
88*51dee5e4SPaolo Bonzini         return true;
89*51dee5e4SPaolo Bonzini     }
90*51dee5e4SPaolo Bonzini 
91*51dee5e4SPaolo Bonzini     qemu_lockcnt_inc_and_unlock(lockcnt);
92*51dee5e4SPaolo Bonzini     return false;
93*51dee5e4SPaolo Bonzini }
94*51dee5e4SPaolo Bonzini 
95*51dee5e4SPaolo Bonzini void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
96*51dee5e4SPaolo Bonzini {
97*51dee5e4SPaolo Bonzini     qemu_mutex_lock(&lockcnt->mutex);
98*51dee5e4SPaolo Bonzini }
99*51dee5e4SPaolo Bonzini 
100*51dee5e4SPaolo Bonzini void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
101*51dee5e4SPaolo Bonzini {
102*51dee5e4SPaolo Bonzini     atomic_inc(&lockcnt->count);
103*51dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
104*51dee5e4SPaolo Bonzini }
105*51dee5e4SPaolo Bonzini 
106*51dee5e4SPaolo Bonzini void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
107*51dee5e4SPaolo Bonzini {
108*51dee5e4SPaolo Bonzini     qemu_mutex_unlock(&lockcnt->mutex);
109*51dee5e4SPaolo Bonzini }
110*51dee5e4SPaolo Bonzini 
111*51dee5e4SPaolo Bonzini unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
112*51dee5e4SPaolo Bonzini {
113*51dee5e4SPaolo Bonzini     return atomic_read(&lockcnt->count);
114*51dee5e4SPaolo Bonzini }
115