xref: /openbmc/qemu/util/stats64.c (revision d73415a3)
1ae2d489cSPaolo Bonzini /*
2ae2d489cSPaolo Bonzini  * Atomic operations on 64-bit quantities.
3ae2d489cSPaolo Bonzini  *
4ae2d489cSPaolo Bonzini  * Copyright (C) 2017 Red Hat, Inc.
5ae2d489cSPaolo Bonzini  *
6ae2d489cSPaolo Bonzini  * Author: Paolo Bonzini <pbonzini@redhat.com>
7ae2d489cSPaolo Bonzini  *
8ae2d489cSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9ae2d489cSPaolo Bonzini  * See the COPYING file in the top-level directory.
10ae2d489cSPaolo Bonzini  */
11ae2d489cSPaolo Bonzini 
12ae2d489cSPaolo Bonzini #include "qemu/osdep.h"
13ae2d489cSPaolo Bonzini #include "qemu/atomic.h"
14ae2d489cSPaolo Bonzini #include "qemu/stats64.h"
15ae2d489cSPaolo Bonzini #include "qemu/processor.h"
16ae2d489cSPaolo Bonzini 
17ae2d489cSPaolo Bonzini #ifndef CONFIG_ATOMIC64
18ae2d489cSPaolo Bonzini static inline void stat64_rdlock(Stat64 *s)
19ae2d489cSPaolo Bonzini {
20ae2d489cSPaolo Bonzini     /* Keep out incoming writers to avoid them starving us. */
21*d73415a3SStefan Hajnoczi     qatomic_add(&s->lock, 2);
22ae2d489cSPaolo Bonzini 
23ae2d489cSPaolo Bonzini     /* If there is a concurrent writer, wait for it.  */
24*d73415a3SStefan Hajnoczi     while (qatomic_read(&s->lock) & 1) {
25ae2d489cSPaolo Bonzini         cpu_relax();
26ae2d489cSPaolo Bonzini     }
27ae2d489cSPaolo Bonzini }
28ae2d489cSPaolo Bonzini 
29ae2d489cSPaolo Bonzini static inline void stat64_rdunlock(Stat64 *s)
30ae2d489cSPaolo Bonzini {
31*d73415a3SStefan Hajnoczi     qatomic_sub(&s->lock, 2);
32ae2d489cSPaolo Bonzini }
33ae2d489cSPaolo Bonzini 
34ae2d489cSPaolo Bonzini static inline bool stat64_wrtrylock(Stat64 *s)
35ae2d489cSPaolo Bonzini {
36*d73415a3SStefan Hajnoczi     return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
37ae2d489cSPaolo Bonzini }
38ae2d489cSPaolo Bonzini 
39ae2d489cSPaolo Bonzini static inline void stat64_wrunlock(Stat64 *s)
40ae2d489cSPaolo Bonzini {
41*d73415a3SStefan Hajnoczi     qatomic_dec(&s->lock);
42ae2d489cSPaolo Bonzini }
43ae2d489cSPaolo Bonzini 
44ae2d489cSPaolo Bonzini uint64_t stat64_get(const Stat64 *s)
45ae2d489cSPaolo Bonzini {
46ae2d489cSPaolo Bonzini     uint32_t high, low;
47ae2d489cSPaolo Bonzini 
48ae2d489cSPaolo Bonzini     stat64_rdlock((Stat64 *)s);
49ae2d489cSPaolo Bonzini 
50ae2d489cSPaolo Bonzini     /* 64-bit writes always take the lock, so we can read in
51ae2d489cSPaolo Bonzini      * any order.
52ae2d489cSPaolo Bonzini      */
53*d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
54*d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
55ae2d489cSPaolo Bonzini     stat64_rdunlock((Stat64 *)s);
56ae2d489cSPaolo Bonzini 
57ae2d489cSPaolo Bonzini     return ((uint64_t)high << 32) | low;
58ae2d489cSPaolo Bonzini }
59ae2d489cSPaolo Bonzini 
60ae2d489cSPaolo Bonzini bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
61ae2d489cSPaolo Bonzini {
62ae2d489cSPaolo Bonzini     uint32_t old;
63ae2d489cSPaolo Bonzini 
64ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
65ae2d489cSPaolo Bonzini         cpu_relax();
66ae2d489cSPaolo Bonzini         return false;
67ae2d489cSPaolo Bonzini     }
68ae2d489cSPaolo Bonzini 
69ae2d489cSPaolo Bonzini     /* 64-bit reads always take the lock, so they don't care about the
70ae2d489cSPaolo Bonzini      * order of our update.  By updating s->low first, we can check
71ae2d489cSPaolo Bonzini      * whether we have to carry into s->high.
72ae2d489cSPaolo Bonzini      */
73*d73415a3SStefan Hajnoczi     old = qatomic_fetch_add(&s->low, low);
74ae2d489cSPaolo Bonzini     high += (old + low) < old;
75*d73415a3SStefan Hajnoczi     qatomic_add(&s->high, high);
76ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
77ae2d489cSPaolo Bonzini     return true;
78ae2d489cSPaolo Bonzini }
79ae2d489cSPaolo Bonzini 
80ae2d489cSPaolo Bonzini bool stat64_min_slow(Stat64 *s, uint64_t value)
81ae2d489cSPaolo Bonzini {
82ae2d489cSPaolo Bonzini     uint32_t high, low;
83ae2d489cSPaolo Bonzini     uint64_t orig;
84ae2d489cSPaolo Bonzini 
85ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
86ae2d489cSPaolo Bonzini         cpu_relax();
87ae2d489cSPaolo Bonzini         return false;
88ae2d489cSPaolo Bonzini     }
89ae2d489cSPaolo Bonzini 
90*d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
91*d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
92ae2d489cSPaolo Bonzini 
93ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
9426a5db32SMax Reitz     if (value < orig) {
95ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_min reads
96ae2d489cSPaolo Bonzini          * high before low.  The value may become higher temporarily, but
97ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
98ae2d489cSPaolo Bonzini          * effect on stat64_min is that the slow path may be triggered
99ae2d489cSPaolo Bonzini          * unnecessarily.
100ae2d489cSPaolo Bonzini          */
101*d73415a3SStefan Hajnoczi         qatomic_set(&s->low, (uint32_t)value);
102ae2d489cSPaolo Bonzini         smp_wmb();
103*d73415a3SStefan Hajnoczi         qatomic_set(&s->high, value >> 32);
104ae2d489cSPaolo Bonzini     }
105ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
106ae2d489cSPaolo Bonzini     return true;
107ae2d489cSPaolo Bonzini }
108ae2d489cSPaolo Bonzini 
109ae2d489cSPaolo Bonzini bool stat64_max_slow(Stat64 *s, uint64_t value)
110ae2d489cSPaolo Bonzini {
111ae2d489cSPaolo Bonzini     uint32_t high, low;
112ae2d489cSPaolo Bonzini     uint64_t orig;
113ae2d489cSPaolo Bonzini 
114ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
115ae2d489cSPaolo Bonzini         cpu_relax();
116ae2d489cSPaolo Bonzini         return false;
117ae2d489cSPaolo Bonzini     }
118ae2d489cSPaolo Bonzini 
119*d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
120*d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
121ae2d489cSPaolo Bonzini 
122ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
12326a5db32SMax Reitz     if (value > orig) {
124ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_max reads
125ae2d489cSPaolo Bonzini          * high before low.  The value may become lower temporarily, but
126ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
127ae2d489cSPaolo Bonzini          * effect on stat64_max is that the slow path may be triggered
128ae2d489cSPaolo Bonzini          * unnecessarily.
129ae2d489cSPaolo Bonzini          */
130*d73415a3SStefan Hajnoczi         qatomic_set(&s->low, (uint32_t)value);
131ae2d489cSPaolo Bonzini         smp_wmb();
132*d73415a3SStefan Hajnoczi         qatomic_set(&s->high, value >> 32);
133ae2d489cSPaolo Bonzini     }
134ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
135ae2d489cSPaolo Bonzini     return true;
136ae2d489cSPaolo Bonzini }
137ae2d489cSPaolo Bonzini #endif
138