xref: /openbmc/qemu/util/stats64.c (revision 7757b55e)
1ae2d489cSPaolo Bonzini /*
2ae2d489cSPaolo Bonzini  * Atomic operations on 64-bit quantities.
3ae2d489cSPaolo Bonzini  *
4ae2d489cSPaolo Bonzini  * Copyright (C) 2017 Red Hat, Inc.
5ae2d489cSPaolo Bonzini  *
6ae2d489cSPaolo Bonzini  * Author: Paolo Bonzini <pbonzini@redhat.com>
7ae2d489cSPaolo Bonzini  *
8ae2d489cSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9ae2d489cSPaolo Bonzini  * See the COPYING file in the top-level directory.
10ae2d489cSPaolo Bonzini  */
11ae2d489cSPaolo Bonzini 
12ae2d489cSPaolo Bonzini #include "qemu/osdep.h"
13ae2d489cSPaolo Bonzini #include "qemu/atomic.h"
14ae2d489cSPaolo Bonzini #include "qemu/stats64.h"
15ae2d489cSPaolo Bonzini #include "qemu/processor.h"
16ae2d489cSPaolo Bonzini 
17ae2d489cSPaolo Bonzini #ifndef CONFIG_ATOMIC64
stat64_rdlock(Stat64 * s)18ae2d489cSPaolo Bonzini static inline void stat64_rdlock(Stat64 *s)
19ae2d489cSPaolo Bonzini {
20ae2d489cSPaolo Bonzini     /* Keep out incoming writers to avoid them starving us. */
21d73415a3SStefan Hajnoczi     qatomic_add(&s->lock, 2);
22ae2d489cSPaolo Bonzini 
23ae2d489cSPaolo Bonzini     /* If there is a concurrent writer, wait for it.  */
24d73415a3SStefan Hajnoczi     while (qatomic_read(&s->lock) & 1) {
25ae2d489cSPaolo Bonzini         cpu_relax();
26ae2d489cSPaolo Bonzini     }
27ae2d489cSPaolo Bonzini }
28ae2d489cSPaolo Bonzini 
stat64_rdunlock(Stat64 * s)29ae2d489cSPaolo Bonzini static inline void stat64_rdunlock(Stat64 *s)
30ae2d489cSPaolo Bonzini {
31d73415a3SStefan Hajnoczi     qatomic_sub(&s->lock, 2);
32ae2d489cSPaolo Bonzini }
33ae2d489cSPaolo Bonzini 
stat64_wrtrylock(Stat64 * s)34ae2d489cSPaolo Bonzini static inline bool stat64_wrtrylock(Stat64 *s)
35ae2d489cSPaolo Bonzini {
36d73415a3SStefan Hajnoczi     return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
37ae2d489cSPaolo Bonzini }
38ae2d489cSPaolo Bonzini 
stat64_wrunlock(Stat64 * s)39ae2d489cSPaolo Bonzini static inline void stat64_wrunlock(Stat64 *s)
40ae2d489cSPaolo Bonzini {
41d73415a3SStefan Hajnoczi     qatomic_dec(&s->lock);
42ae2d489cSPaolo Bonzini }
43ae2d489cSPaolo Bonzini 
stat64_get(const Stat64 * s)44ae2d489cSPaolo Bonzini uint64_t stat64_get(const Stat64 *s)
45ae2d489cSPaolo Bonzini {
46ae2d489cSPaolo Bonzini     uint32_t high, low;
47ae2d489cSPaolo Bonzini 
48ae2d489cSPaolo Bonzini     stat64_rdlock((Stat64 *)s);
49ae2d489cSPaolo Bonzini 
50ae2d489cSPaolo Bonzini     /* 64-bit writes always take the lock, so we can read in
51ae2d489cSPaolo Bonzini      * any order.
52ae2d489cSPaolo Bonzini      */
53d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
54d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
55ae2d489cSPaolo Bonzini     stat64_rdunlock((Stat64 *)s);
56ae2d489cSPaolo Bonzini 
57ae2d489cSPaolo Bonzini     return ((uint64_t)high << 32) | low;
58ae2d489cSPaolo Bonzini }
59ae2d489cSPaolo Bonzini 
stat64_set(Stat64 * s,uint64_t val)60*7757b55eSPaolo Bonzini void stat64_set(Stat64 *s, uint64_t val)
61*7757b55eSPaolo Bonzini {
62*7757b55eSPaolo Bonzini     while (!stat64_wrtrylock(s)) {
63*7757b55eSPaolo Bonzini         cpu_relax();
64*7757b55eSPaolo Bonzini     }
65*7757b55eSPaolo Bonzini 
66*7757b55eSPaolo Bonzini     qatomic_set(&s->high, val >> 32);
67*7757b55eSPaolo Bonzini     qatomic_set(&s->low, val);
68*7757b55eSPaolo Bonzini     stat64_wrunlock(s);
69*7757b55eSPaolo Bonzini }
70*7757b55eSPaolo Bonzini 
stat64_add32_carry(Stat64 * s,uint32_t low,uint32_t high)71ae2d489cSPaolo Bonzini bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
72ae2d489cSPaolo Bonzini {
73ae2d489cSPaolo Bonzini     uint32_t old;
74ae2d489cSPaolo Bonzini 
75ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
76ae2d489cSPaolo Bonzini         cpu_relax();
77ae2d489cSPaolo Bonzini         return false;
78ae2d489cSPaolo Bonzini     }
79ae2d489cSPaolo Bonzini 
80ae2d489cSPaolo Bonzini     /* 64-bit reads always take the lock, so they don't care about the
81ae2d489cSPaolo Bonzini      * order of our update.  By updating s->low first, we can check
82ae2d489cSPaolo Bonzini      * whether we have to carry into s->high.
83ae2d489cSPaolo Bonzini      */
84d73415a3SStefan Hajnoczi     old = qatomic_fetch_add(&s->low, low);
85ae2d489cSPaolo Bonzini     high += (old + low) < old;
86d73415a3SStefan Hajnoczi     qatomic_add(&s->high, high);
87ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
88ae2d489cSPaolo Bonzini     return true;
89ae2d489cSPaolo Bonzini }
90ae2d489cSPaolo Bonzini 
stat64_min_slow(Stat64 * s,uint64_t value)91ae2d489cSPaolo Bonzini bool stat64_min_slow(Stat64 *s, uint64_t value)
92ae2d489cSPaolo Bonzini {
93ae2d489cSPaolo Bonzini     uint32_t high, low;
94ae2d489cSPaolo Bonzini     uint64_t orig;
95ae2d489cSPaolo Bonzini 
96ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
97ae2d489cSPaolo Bonzini         cpu_relax();
98ae2d489cSPaolo Bonzini         return false;
99ae2d489cSPaolo Bonzini     }
100ae2d489cSPaolo Bonzini 
101d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
102d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
103ae2d489cSPaolo Bonzini 
104ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
10526a5db32SMax Reitz     if (value < orig) {
106ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_min reads
107ae2d489cSPaolo Bonzini          * high before low.  The value may become higher temporarily, but
108ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
109ae2d489cSPaolo Bonzini          * effect on stat64_min is that the slow path may be triggered
110ae2d489cSPaolo Bonzini          * unnecessarily.
111ae2d489cSPaolo Bonzini          */
112d73415a3SStefan Hajnoczi         qatomic_set(&s->low, (uint32_t)value);
113ae2d489cSPaolo Bonzini         smp_wmb();
114d73415a3SStefan Hajnoczi         qatomic_set(&s->high, value >> 32);
115ae2d489cSPaolo Bonzini     }
116ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
117ae2d489cSPaolo Bonzini     return true;
118ae2d489cSPaolo Bonzini }
119ae2d489cSPaolo Bonzini 
stat64_max_slow(Stat64 * s,uint64_t value)120ae2d489cSPaolo Bonzini bool stat64_max_slow(Stat64 *s, uint64_t value)
121ae2d489cSPaolo Bonzini {
122ae2d489cSPaolo Bonzini     uint32_t high, low;
123ae2d489cSPaolo Bonzini     uint64_t orig;
124ae2d489cSPaolo Bonzini 
125ae2d489cSPaolo Bonzini     if (!stat64_wrtrylock(s)) {
126ae2d489cSPaolo Bonzini         cpu_relax();
127ae2d489cSPaolo Bonzini         return false;
128ae2d489cSPaolo Bonzini     }
129ae2d489cSPaolo Bonzini 
130d73415a3SStefan Hajnoczi     high = qatomic_read(&s->high);
131d73415a3SStefan Hajnoczi     low = qatomic_read(&s->low);
132ae2d489cSPaolo Bonzini 
133ae2d489cSPaolo Bonzini     orig = ((uint64_t)high << 32) | low;
13426a5db32SMax Reitz     if (value > orig) {
135ae2d489cSPaolo Bonzini         /* We have to set low before high, just like stat64_max reads
136ae2d489cSPaolo Bonzini          * high before low.  The value may become lower temporarily, but
137ae2d489cSPaolo Bonzini          * stat64_get does not notice (it takes the lock) and the only ill
138ae2d489cSPaolo Bonzini          * effect on stat64_max is that the slow path may be triggered
139ae2d489cSPaolo Bonzini          * unnecessarily.
140ae2d489cSPaolo Bonzini          */
141d73415a3SStefan Hajnoczi         qatomic_set(&s->low, (uint32_t)value);
142ae2d489cSPaolo Bonzini         smp_wmb();
143d73415a3SStefan Hajnoczi         qatomic_set(&s->high, value >> 32);
144ae2d489cSPaolo Bonzini     }
145ae2d489cSPaolo Bonzini     stat64_wrunlock(s);
146ae2d489cSPaolo Bonzini     return true;
147ae2d489cSPaolo Bonzini }
148ae2d489cSPaolo Bonzini #endif
149