xref: /openbmc/qemu/util/stats64.c (revision 6016b7b4)
1 /*
2  * Atomic operations on 64-bit quantities.
3  *
4  * Copyright (C) 2017 Red Hat, Inc.
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/atomic.h"
14 #include "qemu/stats64.h"
15 #include "qemu/processor.h"
16 
17 #ifndef CONFIG_ATOMIC64
18 static inline void stat64_rdlock(Stat64 *s)
19 {
20     /* Keep out incoming writers to avoid them starving us. */
21     qatomic_add(&s->lock, 2);
22 
23     /* If there is a concurrent writer, wait for it.  */
24     while (qatomic_read(&s->lock) & 1) {
25         cpu_relax();
26     }
27 }
28 
29 static inline void stat64_rdunlock(Stat64 *s)
30 {
31     qatomic_sub(&s->lock, 2);
32 }
33 
34 static inline bool stat64_wrtrylock(Stat64 *s)
35 {
36     return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
37 }
38 
39 static inline void stat64_wrunlock(Stat64 *s)
40 {
41     qatomic_dec(&s->lock);
42 }
43 
44 uint64_t stat64_get(const Stat64 *s)
45 {
46     uint32_t high, low;
47 
48     stat64_rdlock((Stat64 *)s);
49 
50     /* 64-bit writes always take the lock, so we can read in
51      * any order.
52      */
53     high = qatomic_read(&s->high);
54     low = qatomic_read(&s->low);
55     stat64_rdunlock((Stat64 *)s);
56 
57     return ((uint64_t)high << 32) | low;
58 }
59 
60 bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
61 {
62     uint32_t old;
63 
64     if (!stat64_wrtrylock(s)) {
65         cpu_relax();
66         return false;
67     }
68 
69     /* 64-bit reads always take the lock, so they don't care about the
70      * order of our update.  By updating s->low first, we can check
71      * whether we have to carry into s->high.
72      */
73     old = qatomic_fetch_add(&s->low, low);
74     high += (old + low) < old;
75     qatomic_add(&s->high, high);
76     stat64_wrunlock(s);
77     return true;
78 }
79 
80 bool stat64_min_slow(Stat64 *s, uint64_t value)
81 {
82     uint32_t high, low;
83     uint64_t orig;
84 
85     if (!stat64_wrtrylock(s)) {
86         cpu_relax();
87         return false;
88     }
89 
90     high = qatomic_read(&s->high);
91     low = qatomic_read(&s->low);
92 
93     orig = ((uint64_t)high << 32) | low;
94     if (value < orig) {
95         /* We have to set low before high, just like stat64_min reads
96          * high before low.  The value may become higher temporarily, but
97          * stat64_get does not notice (it takes the lock) and the only ill
98          * effect on stat64_min is that the slow path may be triggered
99          * unnecessarily.
100          */
101         qatomic_set(&s->low, (uint32_t)value);
102         smp_wmb();
103         qatomic_set(&s->high, value >> 32);
104     }
105     stat64_wrunlock(s);
106     return true;
107 }
108 
109 bool stat64_max_slow(Stat64 *s, uint64_t value)
110 {
111     uint32_t high, low;
112     uint64_t orig;
113 
114     if (!stat64_wrtrylock(s)) {
115         cpu_relax();
116         return false;
117     }
118 
119     high = qatomic_read(&s->high);
120     low = qatomic_read(&s->low);
121 
122     orig = ((uint64_t)high << 32) | low;
123     if (value > orig) {
124         /* We have to set low before high, just like stat64_max reads
125          * high before low.  The value may become lower temporarily, but
126          * stat64_get does not notice (it takes the lock) and the only ill
127          * effect on stat64_max is that the slow path may be triggered
128          * unnecessarily.
129          */
130         qatomic_set(&s->low, (uint32_t)value);
131         smp_wmb();
132         qatomic_set(&s->high, value >> 32);
133     }
134     stat64_wrunlock(s);
135     return true;
136 }
137 #endif
138