xref: /openbmc/qemu/include/qemu/stats64.h (revision 7757b55e)
1ae2d489cSPaolo Bonzini /*
2ae2d489cSPaolo Bonzini  * Atomic operations on 64-bit quantities.
3ae2d489cSPaolo Bonzini  *
4ae2d489cSPaolo Bonzini  * Copyright (C) 2017 Red Hat, Inc.
5ae2d489cSPaolo Bonzini  *
6ae2d489cSPaolo Bonzini  * Author: Paolo Bonzini <pbonzini@redhat.com>
7ae2d489cSPaolo Bonzini  *
8ae2d489cSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9ae2d489cSPaolo Bonzini  * See the COPYING file in the top-level directory.
10ae2d489cSPaolo Bonzini  */
11ae2d489cSPaolo Bonzini 
12ae2d489cSPaolo Bonzini #ifndef QEMU_STATS64_H
13177d9e0dSMarkus Armbruster #define QEMU_STATS64_H
14ae2d489cSPaolo Bonzini 
15ae2d489cSPaolo Bonzini #include "qemu/atomic.h"
16ae2d489cSPaolo Bonzini 
17ae2d489cSPaolo Bonzini /* This provides atomic operations on 64-bit type, using a reader-writer
18ae2d489cSPaolo Bonzini  * spinlock on architectures that do not have 64-bit accesses.  Even on
19ae2d489cSPaolo Bonzini  * those architectures, it tries hard not to take the lock.
20ae2d489cSPaolo Bonzini  */
21ae2d489cSPaolo Bonzini 
22ae2d489cSPaolo Bonzini typedef struct Stat64 {
23ae2d489cSPaolo Bonzini #ifdef CONFIG_ATOMIC64
249ef0c6d6SRichard Henderson     aligned_uint64_t value;
25ae2d489cSPaolo Bonzini #else
26ae2d489cSPaolo Bonzini     uint32_t low, high;
27ae2d489cSPaolo Bonzini     uint32_t lock;
28ae2d489cSPaolo Bonzini #endif
29ae2d489cSPaolo Bonzini } Stat64;
30ae2d489cSPaolo Bonzini 
31ae2d489cSPaolo Bonzini #ifdef CONFIG_ATOMIC64
stat64_init(Stat64 * s,uint64_t value)32ae2d489cSPaolo Bonzini static inline void stat64_init(Stat64 *s, uint64_t value)
33ae2d489cSPaolo Bonzini {
34ae2d489cSPaolo Bonzini     /* This is not guaranteed to be atomic! */
35ae2d489cSPaolo Bonzini     *s = (Stat64) { value };
36ae2d489cSPaolo Bonzini }
37ae2d489cSPaolo Bonzini 
stat64_get(const Stat64 * s)38ae2d489cSPaolo Bonzini static inline uint64_t stat64_get(const Stat64 *s)
39ae2d489cSPaolo Bonzini {
40d73415a3SStefan Hajnoczi     return qatomic_read__nocheck(&s->value);
41ae2d489cSPaolo Bonzini }
42ae2d489cSPaolo Bonzini 
stat64_set(Stat64 * s,uint64_t value)43*7757b55eSPaolo Bonzini static inline void stat64_set(Stat64 *s, uint64_t value)
44*7757b55eSPaolo Bonzini {
45*7757b55eSPaolo Bonzini     qatomic_set__nocheck(&s->value, value);
46*7757b55eSPaolo Bonzini }
47*7757b55eSPaolo Bonzini 
stat64_add(Stat64 * s,uint64_t value)48ae2d489cSPaolo Bonzini static inline void stat64_add(Stat64 *s, uint64_t value)
49ae2d489cSPaolo Bonzini {
50d73415a3SStefan Hajnoczi     qatomic_add(&s->value, value);
51ae2d489cSPaolo Bonzini }
52ae2d489cSPaolo Bonzini 
stat64_min(Stat64 * s,uint64_t value)53ae2d489cSPaolo Bonzini static inline void stat64_min(Stat64 *s, uint64_t value)
54ae2d489cSPaolo Bonzini {
55d73415a3SStefan Hajnoczi     uint64_t orig = qatomic_read__nocheck(&s->value);
56ae2d489cSPaolo Bonzini     while (orig > value) {
57d73415a3SStefan Hajnoczi         orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
58ae2d489cSPaolo Bonzini     }
59ae2d489cSPaolo Bonzini }
60ae2d489cSPaolo Bonzini 
stat64_max(Stat64 * s,uint64_t value)61ae2d489cSPaolo Bonzini static inline void stat64_max(Stat64 *s, uint64_t value)
62ae2d489cSPaolo Bonzini {
63d73415a3SStefan Hajnoczi     uint64_t orig = qatomic_read__nocheck(&s->value);
64ae2d489cSPaolo Bonzini     while (orig < value) {
65d73415a3SStefan Hajnoczi         orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
66ae2d489cSPaolo Bonzini     }
67ae2d489cSPaolo Bonzini }
68ae2d489cSPaolo Bonzini #else
69ae2d489cSPaolo Bonzini uint64_t stat64_get(const Stat64 *s);
70*7757b55eSPaolo Bonzini void stat64_set(Stat64 *s, uint64_t value);
71ae2d489cSPaolo Bonzini bool stat64_min_slow(Stat64 *s, uint64_t value);
72ae2d489cSPaolo Bonzini bool stat64_max_slow(Stat64 *s, uint64_t value);
73ae2d489cSPaolo Bonzini bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
74ae2d489cSPaolo Bonzini 
stat64_init(Stat64 * s,uint64_t value)75ae2d489cSPaolo Bonzini static inline void stat64_init(Stat64 *s, uint64_t value)
76ae2d489cSPaolo Bonzini {
77ae2d489cSPaolo Bonzini     /* This is not guaranteed to be atomic! */
78ae2d489cSPaolo Bonzini     *s = (Stat64) { .low = value, .high = value >> 32, .lock = 0 };
79ae2d489cSPaolo Bonzini }
80ae2d489cSPaolo Bonzini 
stat64_add(Stat64 * s,uint64_t value)81ae2d489cSPaolo Bonzini static inline void stat64_add(Stat64 *s, uint64_t value)
82ae2d489cSPaolo Bonzini {
83ae2d489cSPaolo Bonzini     uint32_t low, high;
84ae2d489cSPaolo Bonzini     high = value >> 32;
85ae2d489cSPaolo Bonzini     low = (uint32_t) value;
86ae2d489cSPaolo Bonzini     if (!low) {
87ae2d489cSPaolo Bonzini         if (high) {
88d73415a3SStefan Hajnoczi             qatomic_add(&s->high, high);
89ae2d489cSPaolo Bonzini         }
90ae2d489cSPaolo Bonzini         return;
91ae2d489cSPaolo Bonzini     }
92ae2d489cSPaolo Bonzini 
93ae2d489cSPaolo Bonzini     for (;;) {
94ae2d489cSPaolo Bonzini         uint32_t orig = s->low;
95ae2d489cSPaolo Bonzini         uint32_t result = orig + low;
96ae2d489cSPaolo Bonzini         uint32_t old;
97ae2d489cSPaolo Bonzini 
98ae2d489cSPaolo Bonzini         if (result < low || high) {
99ae2d489cSPaolo Bonzini             /* If the high part is affected, take the lock.  */
100ae2d489cSPaolo Bonzini             if (stat64_add32_carry(s, low, high)) {
101ae2d489cSPaolo Bonzini                 return;
102ae2d489cSPaolo Bonzini             }
103ae2d489cSPaolo Bonzini             continue;
104ae2d489cSPaolo Bonzini         }
105ae2d489cSPaolo Bonzini 
106ae2d489cSPaolo Bonzini         /* No carry, try with a 32-bit cmpxchg.  The result is independent of
107ae2d489cSPaolo Bonzini          * the high 32 bits, so it can race just fine with stat64_add32_carry
108ae2d489cSPaolo Bonzini          * and even stat64_get!
109ae2d489cSPaolo Bonzini          */
110d73415a3SStefan Hajnoczi         old = qatomic_cmpxchg(&s->low, orig, result);
111ae2d489cSPaolo Bonzini         if (orig == old) {
112ae2d489cSPaolo Bonzini             return;
113ae2d489cSPaolo Bonzini         }
114ae2d489cSPaolo Bonzini     }
115ae2d489cSPaolo Bonzini }
116ae2d489cSPaolo Bonzini 
stat64_min(Stat64 * s,uint64_t value)117ae2d489cSPaolo Bonzini static inline void stat64_min(Stat64 *s, uint64_t value)
118ae2d489cSPaolo Bonzini {
119ae2d489cSPaolo Bonzini     uint32_t low, high;
120ae2d489cSPaolo Bonzini     uint32_t orig_low, orig_high;
121ae2d489cSPaolo Bonzini 
122ae2d489cSPaolo Bonzini     high = value >> 32;
123ae2d489cSPaolo Bonzini     low = (uint32_t) value;
124ae2d489cSPaolo Bonzini     do {
125d73415a3SStefan Hajnoczi         orig_high = qatomic_read(&s->high);
126ae2d489cSPaolo Bonzini         if (orig_high < high) {
127ae2d489cSPaolo Bonzini             return;
128ae2d489cSPaolo Bonzini         }
129ae2d489cSPaolo Bonzini 
130ae2d489cSPaolo Bonzini         if (orig_high == high) {
131ae2d489cSPaolo Bonzini             /* High 32 bits are equal.  Read low after high, otherwise we
132ae2d489cSPaolo Bonzini              * can get a false positive (e.g. 0x1235,0x0000 changes to
133ae2d489cSPaolo Bonzini              * 0x1234,0x8000 and we read it as 0x1234,0x0000). Pairs with
134ae2d489cSPaolo Bonzini              * the write barrier in stat64_min_slow.
135ae2d489cSPaolo Bonzini              */
136ae2d489cSPaolo Bonzini             smp_rmb();
137d73415a3SStefan Hajnoczi             orig_low = qatomic_read(&s->low);
138ae2d489cSPaolo Bonzini             if (orig_low <= low) {
139ae2d489cSPaolo Bonzini                 return;
140ae2d489cSPaolo Bonzini             }
141ae2d489cSPaolo Bonzini 
142ae2d489cSPaolo Bonzini             /* See if we were lucky and a writer raced against us.  The
143ae2d489cSPaolo Bonzini              * barrier is theoretically unnecessary, but if we remove it
144ae2d489cSPaolo Bonzini              * we may miss being lucky.
145ae2d489cSPaolo Bonzini              */
146ae2d489cSPaolo Bonzini             smp_rmb();
147d73415a3SStefan Hajnoczi             orig_high = qatomic_read(&s->high);
148ae2d489cSPaolo Bonzini             if (orig_high < high) {
149ae2d489cSPaolo Bonzini                 return;
150ae2d489cSPaolo Bonzini             }
151ae2d489cSPaolo Bonzini         }
152ae2d489cSPaolo Bonzini 
153ae2d489cSPaolo Bonzini         /* If the value changes in any way, we have to take the lock.  */
154ae2d489cSPaolo Bonzini     } while (!stat64_min_slow(s, value));
155ae2d489cSPaolo Bonzini }
156ae2d489cSPaolo Bonzini 
stat64_max(Stat64 * s,uint64_t value)157ae2d489cSPaolo Bonzini static inline void stat64_max(Stat64 *s, uint64_t value)
158ae2d489cSPaolo Bonzini {
159ae2d489cSPaolo Bonzini     uint32_t low, high;
160ae2d489cSPaolo Bonzini     uint32_t orig_low, orig_high;
161ae2d489cSPaolo Bonzini 
162ae2d489cSPaolo Bonzini     high = value >> 32;
163ae2d489cSPaolo Bonzini     low = (uint32_t) value;
164ae2d489cSPaolo Bonzini     do {
165d73415a3SStefan Hajnoczi         orig_high = qatomic_read(&s->high);
166ae2d489cSPaolo Bonzini         if (orig_high > high) {
167ae2d489cSPaolo Bonzini             return;
168ae2d489cSPaolo Bonzini         }
169ae2d489cSPaolo Bonzini 
170ae2d489cSPaolo Bonzini         if (orig_high == high) {
171ae2d489cSPaolo Bonzini             /* High 32 bits are equal.  Read low after high, otherwise we
172ae2d489cSPaolo Bonzini              * can get a false positive (e.g. 0x1234,0x8000 changes to
173ae2d489cSPaolo Bonzini              * 0x1235,0x0000 and we read it as 0x1235,0x8000). Pairs with
174ae2d489cSPaolo Bonzini              * the write barrier in stat64_max_slow.
175ae2d489cSPaolo Bonzini              */
176ae2d489cSPaolo Bonzini             smp_rmb();
177d73415a3SStefan Hajnoczi             orig_low = qatomic_read(&s->low);
178ae2d489cSPaolo Bonzini             if (orig_low >= low) {
179ae2d489cSPaolo Bonzini                 return;
180ae2d489cSPaolo Bonzini             }
181ae2d489cSPaolo Bonzini 
182ae2d489cSPaolo Bonzini             /* See if we were lucky and a writer raced against us.  The
183ae2d489cSPaolo Bonzini              * barrier is theoretically unnecessary, but if we remove it
184ae2d489cSPaolo Bonzini              * we may miss being lucky.
185ae2d489cSPaolo Bonzini              */
186ae2d489cSPaolo Bonzini             smp_rmb();
187d73415a3SStefan Hajnoczi             orig_high = qatomic_read(&s->high);
188ae2d489cSPaolo Bonzini             if (orig_high > high) {
189ae2d489cSPaolo Bonzini                 return;
190ae2d489cSPaolo Bonzini             }
191ae2d489cSPaolo Bonzini         }
192ae2d489cSPaolo Bonzini 
193ae2d489cSPaolo Bonzini         /* If the value changes in any way, we have to take the lock.  */
194ae2d489cSPaolo Bonzini     } while (!stat64_max_slow(s, value));
195ae2d489cSPaolo Bonzini }
196ae2d489cSPaolo Bonzini 
197ae2d489cSPaolo Bonzini #endif
198ae2d489cSPaolo Bonzini 
199ae2d489cSPaolo Bonzini #endif
200