xref: /openbmc/qemu/util/stats64.c (revision ae2d489c341ee98f2daad819a3812c6199481848)
1*ae2d489cSPaolo Bonzini /*
2*ae2d489cSPaolo Bonzini  * Atomic operations on 64-bit quantities.
3*ae2d489cSPaolo Bonzini  *
4*ae2d489cSPaolo Bonzini  * Copyright (C) 2017 Red Hat, Inc.
5*ae2d489cSPaolo Bonzini  *
6*ae2d489cSPaolo Bonzini  * Author: Paolo Bonzini <pbonzini@redhat.com>
7*ae2d489cSPaolo Bonzini  *
8*ae2d489cSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9*ae2d489cSPaolo Bonzini  * See the COPYING file in the top-level directory.
10*ae2d489cSPaolo Bonzini  */
11*ae2d489cSPaolo Bonzini 
12*ae2d489cSPaolo Bonzini #include "qemu/osdep.h"
13*ae2d489cSPaolo Bonzini #include "qemu/atomic.h"
14*ae2d489cSPaolo Bonzini #include "qemu/stats64.h"
15*ae2d489cSPaolo Bonzini #include "qemu/processor.h"
16*ae2d489cSPaolo Bonzini 
17*ae2d489cSPaolo Bonzini #ifndef CONFIG_ATOMIC64
18*ae2d489cSPaolo Bonzini static inline void stat64_rdlock(Stat64 *s)
19*ae2d489cSPaolo Bonzini {
20*ae2d489cSPaolo Bonzini     /* Keep out incoming writers to avoid them starving us. */
21*ae2d489cSPaolo Bonzini     atomic_add(&s->lock, 2);
22*ae2d489cSPaolo Bonzini 
23*ae2d489cSPaolo Bonzini     /* If there is a concurrent writer, wait for it.  */
24*ae2d489cSPaolo Bonzini     while (atomic_read(&s->lock) & 1) {
25*ae2d489cSPaolo Bonzini         cpu_relax();
26*ae2d489cSPaolo Bonzini     }
27*ae2d489cSPaolo Bonzini }
28*ae2d489cSPaolo Bonzini 
29*ae2d489cSPaolo Bonzini static inline void stat64_rdunlock(Stat64 *s)
30*ae2d489cSPaolo Bonzini {
31*ae2d489cSPaolo Bonzini     atomic_sub(&s->lock, 2);
32*ae2d489cSPaolo Bonzini }
33*ae2d489cSPaolo Bonzini 
34*ae2d489cSPaolo Bonzini static inline bool stat64_wrtrylock(Stat64 *s)
35*ae2d489cSPaolo Bonzini {
36*ae2d489cSPaolo Bonzini     return atomic_cmpxchg(&s->lock, 0, 1) == 0;
37*ae2d489cSPaolo Bonzini }
38*ae2d489cSPaolo Bonzini 
39*ae2d489cSPaolo Bonzini static inline void stat64_wrunlock(Stat64 *s)
40*ae2d489cSPaolo Bonzini {
41*ae2d489cSPaolo Bonzini     atomic_dec(&s->lock);
42*ae2d489cSPaolo Bonzini }
43*ae2d489cSPaolo Bonzini 
44*ae2d489cSPaolo Bonzini uint64_t stat64_get(const Stat64 *s)
45*ae2d489cSPaolo Bonzini {
46*ae2d489cSPaolo Bonzini     uint32_t high, low;
47*ae2d489cSPaolo Bonzini 
48*ae2d489cSPaolo Bonzini     stat64_rdlock((Stat64 *)s);
49*ae2d489cSPaolo Bonzini 
50*ae2d489cSPaolo Bonzini     /* 64-bit writes always take the lock, so we can read in
51*ae2d489cSPaolo Bonzini      * any order.
52*ae2d489cSPaolo Bonzini      */
53*ae2d489cSPaolo Bonzini     high = atomic_read(&s->high);
54*ae2d489cSPaolo Bonzini     low = atomic_read(&s->low);
55*ae2d489cSPaolo Bonzini     stat64_rdunlock((Stat64 *)s);
56*ae2d489cSPaolo Bonzini 
57*ae2d489cSPaolo Bonzini     return ((uint64_t)high << 32) | low;
58*ae2d489cSPaolo Bonzini }
59*ae2d489cSPaolo Bonzini 
60*ae2d489cSPaolo Bonzini bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
61*ae2d489cSPaolo Bonzini {
62*ae2d489cSPaolo Bonzini     uint32_t old;
63*ae2d489cSPaolo Bonzini 
64*ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
65*ae2d489cSPaolo Bonzini         cpu_relax();
66*ae2d489cSPaolo Bonzini         return false;
67*ae2d489cSPaolo Bonzini     }
68*ae2d489cSPaolo Bonzini 
69*ae2d489cSPaolo Bonzini     /* 64-bit reads always take the lock, so they don't care about the
70*ae2d489cSPaolo Bonzini      * order of our update.  By updating s->low first, we can check
71*ae2d489cSPaolo Bonzini      * whether we have to carry into s->high.
72*ae2d489cSPaolo Bonzini      */
73*ae2d489cSPaolo Bonzini     old = atomic_fetch_add(&s->low, low);
74*ae2d489cSPaolo Bonzini     high += (old + low) < old;
75*ae2d489cSPaolo Bonzini     atomic_add(&s->high, high);
76*ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
77*ae2d489cSPaolo Bonzini     return true;
78*ae2d489cSPaolo Bonzini }
79*ae2d489cSPaolo Bonzini 
80*ae2d489cSPaolo Bonzini bool stat64_min_slow(Stat64 *s, uint64_t value)
81*ae2d489cSPaolo Bonzini {
82*ae2d489cSPaolo Bonzini     uint32_t high, low;
83*ae2d489cSPaolo Bonzini     uint64_t orig;
84*ae2d489cSPaolo Bonzini 
85*ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
86*ae2d489cSPaolo Bonzini         cpu_relax();
87*ae2d489cSPaolo Bonzini         return false;
88*ae2d489cSPaolo Bonzini     }
89*ae2d489cSPaolo Bonzini 
90*ae2d489cSPaolo Bonzini     high = atomic_read(&s->high);
91*ae2d489cSPaolo Bonzini     low = atomic_read(&s->low);
92*ae2d489cSPaolo Bonzini 
93*ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
94*ae2d489cSPaolo Bonzini     if (orig < value) {
95*ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_min reads
96*ae2d489cSPaolo Bonzini          * high before low.  The value may become higher temporarily, but
97*ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
98*ae2d489cSPaolo Bonzini          * effect on stat64_min is that the slow path may be triggered
99*ae2d489cSPaolo Bonzini          * unnecessarily.
100*ae2d489cSPaolo Bonzini          */
101*ae2d489cSPaolo Bonzini         atomic_set(&s->low, (uint32_t)value);
102*ae2d489cSPaolo Bonzini         smp_wmb();
103*ae2d489cSPaolo Bonzini         atomic_set(&s->high, value >> 32);
104*ae2d489cSPaolo Bonzini     }
105*ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
106*ae2d489cSPaolo Bonzini     return true;
107*ae2d489cSPaolo Bonzini }
108*ae2d489cSPaolo Bonzini 
109*ae2d489cSPaolo Bonzini bool stat64_max_slow(Stat64 *s, uint64_t value)
110*ae2d489cSPaolo Bonzini {
111*ae2d489cSPaolo Bonzini     uint32_t high, low;
112*ae2d489cSPaolo Bonzini     uint64_t orig;
113*ae2d489cSPaolo Bonzini 
114*ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
115*ae2d489cSPaolo Bonzini         cpu_relax();
116*ae2d489cSPaolo Bonzini         return false;
117*ae2d489cSPaolo Bonzini     }
118*ae2d489cSPaolo Bonzini 
119*ae2d489cSPaolo Bonzini     high = atomic_read(&s->high);
120*ae2d489cSPaolo Bonzini     low = atomic_read(&s->low);
121*ae2d489cSPaolo Bonzini 
122*ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
123*ae2d489cSPaolo Bonzini     if (orig > value) {
124*ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_max reads
125*ae2d489cSPaolo Bonzini          * high before low.  The value may become lower temporarily, but
126*ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
127*ae2d489cSPaolo Bonzini          * effect on stat64_max is that the slow path may be triggered
128*ae2d489cSPaolo Bonzini          * unnecessarily.
129*ae2d489cSPaolo Bonzini          */
130*ae2d489cSPaolo Bonzini         atomic_set(&s->low, (uint32_t)value);
131*ae2d489cSPaolo Bonzini         smp_wmb();
132*ae2d489cSPaolo Bonzini         atomic_set(&s->high, value >> 32);
133*ae2d489cSPaolo Bonzini     }
134*ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
135*ae2d489cSPaolo Bonzini     return true;
136*ae2d489cSPaolo Bonzini }
137*ae2d489cSPaolo Bonzini #endif
138