stats64.c (62955e101e4bdc113e3205174567c9c8e12ec1b4) | stats64.c (d73415a315471ac0b127ed3fad45c8ec5d711de1) |
---|---|
1/* 2 * Atomic operations on 64-bit quantities. 3 * 4 * Copyright (C) 2017 Red Hat, Inc. 5 * 6 * Author: Paolo Bonzini <pbonzini@redhat.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or later. --- 4 unchanged lines hidden (view full) --- 13#include "qemu/atomic.h" 14#include "qemu/stats64.h" 15#include "qemu/processor.h" 16 17#ifndef CONFIG_ATOMIC64 18static inline void stat64_rdlock(Stat64 *s) 19{ 20 /* Keep out incoming writers to avoid them starving us. */ | 1/* 2 * Atomic operations on 64-bit quantities. 3 * 4 * Copyright (C) 2017 Red Hat, Inc. 5 * 6 * Author: Paolo Bonzini <pbonzini@redhat.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or later. --- 4 unchanged lines hidden (view full) --- 13#include "qemu/atomic.h" 14#include "qemu/stats64.h" 15#include "qemu/processor.h" 16 17#ifndef CONFIG_ATOMIC64 18static inline void stat64_rdlock(Stat64 *s) 19{ 20 /* Keep out incoming writers to avoid them starving us. */ |
21 atomic_add(&s->lock, 2); | 21 qatomic_add(&s->lock, 2); |
22 23 /* If there is a concurrent writer, wait for it. */ | 22 23 /* If there is a concurrent writer, wait for it. */ |
24 while (atomic_read(&s->lock) & 1) { | 24 while (qatomic_read(&s->lock) & 1) { |
25 cpu_relax(); 26 } 27} 28 29static inline void stat64_rdunlock(Stat64 *s) 30{ | 25 cpu_relax(); 26 } 27} 28 29static inline void stat64_rdunlock(Stat64 *s) 30{ |
31 atomic_sub(&s->lock, 2); | 31 qatomic_sub(&s->lock, 2); |
32} 33 34static inline bool stat64_wrtrylock(Stat64 *s) 35{ | 32} 33 34static inline bool stat64_wrtrylock(Stat64 *s) 35{ |
36 return atomic_cmpxchg(&s->lock, 0, 1) == 0; | 36 return qatomic_cmpxchg(&s->lock, 0, 1) == 0; |
37} 38 39static inline void stat64_wrunlock(Stat64 *s) 40{ | 37} 38 39static inline void stat64_wrunlock(Stat64 *s) 40{ |
41 atomic_dec(&s->lock); | 41 qatomic_dec(&s->lock); |
42} 43 44uint64_t stat64_get(const Stat64 *s) 45{ 46 uint32_t high, low; 47 48 stat64_rdlock((Stat64 *)s); 49 50 /* 64-bit writes always take the lock, so we can read in 51 * any order. 52 */ | 42} 43 44uint64_t stat64_get(const Stat64 *s) 45{ 46 uint32_t high, low; 47 48 stat64_rdlock((Stat64 *)s); 49 50 /* 64-bit writes always take the lock, so we can read in 51 * any order. 52 */ |
53 high = atomic_read(&s->high); 54 low = atomic_read(&s->low); | 53 high = qatomic_read(&s->high); 54 low = qatomic_read(&s->low); |
55 stat64_rdunlock((Stat64 *)s); 56 57 return ((uint64_t)high << 32) | low; 58} 59 60bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) 61{ 62 uint32_t old; 63 64 if (!stat64_wrtrylock(s)) { 65 cpu_relax(); 66 return false; 67 } 68 69 /* 64-bit reads always take the lock, so they don't care about the 70 * order of our update. By updating s->low first, we can check 71 * whether we have to carry into s->high. 72 */ | 55 stat64_rdunlock((Stat64 *)s); 56 57 return ((uint64_t)high << 32) | low; 58} 59 60bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) 61{ 62 uint32_t old; 63 64 if (!stat64_wrtrylock(s)) { 65 cpu_relax(); 66 return false; 67 } 68 69 /* 64-bit reads always take the lock, so they don't care about the 70 * order of our update. By updating s->low first, we can check 71 * whether we have to carry into s->high. 72 */ |
73 old = atomic_fetch_add(&s->low, low); | 73 old = qatomic_fetch_add(&s->low, low); |
74 high += (old + low) < old; | 74 high += (old + low) < old; |
75 atomic_add(&s->high, high); | 75 qatomic_add(&s->high, high); |
76 stat64_wrunlock(s); 77 return true; 78} 79 80bool stat64_min_slow(Stat64 *s, uint64_t value) 81{ 82 uint32_t high, low; 83 uint64_t orig; 84 85 if (!stat64_wrtrylock(s)) { 86 cpu_relax(); 87 return false; 88 } 89 | 76 stat64_wrunlock(s); 77 return true; 78} 79 80bool stat64_min_slow(Stat64 *s, uint64_t value) 81{ 82 uint32_t high, low; 83 uint64_t orig; 84 85 if (!stat64_wrtrylock(s)) { 86 cpu_relax(); 87 return false; 88 } 89 |
90 high = atomic_read(&s->high); 91 low = atomic_read(&s->low); | 90 high = qatomic_read(&s->high); 91 low = qatomic_read(&s->low); |
92 93 orig = ((uint64_t)high << 32) | low; 94 if (value < orig) { 95 /* We have to set low before high, just like stat64_min reads 96 * high before low. The value may become higher temporarily, but 97 * stat64_get does not notice (it takes the lock) and the only ill 98 * effect on stat64_min is that the slow path may be triggered 99 * unnecessarily. 100 */ | 92 93 orig = ((uint64_t)high << 32) | low; 94 if (value < orig) { 95 /* We have to set low before high, just like stat64_min reads 96 * high before low. The value may become higher temporarily, but 97 * stat64_get does not notice (it takes the lock) and the only ill 98 * effect on stat64_min is that the slow path may be triggered 99 * unnecessarily. 100 */ |
101 atomic_set(&s->low, (uint32_t)value); | 101 qatomic_set(&s->low, (uint32_t)value); |
102 smp_wmb(); | 102 smp_wmb(); |
103 atomic_set(&s->high, value >> 32); | 103 qatomic_set(&s->high, value >> 32); |
104 } 105 stat64_wrunlock(s); 106 return true; 107} 108 109bool stat64_max_slow(Stat64 *s, uint64_t value) 110{ 111 uint32_t high, low; 112 uint64_t orig; 113 114 if (!stat64_wrtrylock(s)) { 115 cpu_relax(); 116 return false; 117 } 118 | 104 } 105 stat64_wrunlock(s); 106 return true; 107} 108 109bool stat64_max_slow(Stat64 *s, uint64_t value) 110{ 111 uint32_t high, low; 112 uint64_t orig; 113 114 if (!stat64_wrtrylock(s)) { 115 cpu_relax(); 116 return false; 117 } 118 |
119 high = atomic_read(&s->high); 120 low = atomic_read(&s->low); | 119 high = qatomic_read(&s->high); 120 low = qatomic_read(&s->low); |
121 122 orig = ((uint64_t)high << 32) | low; 123 if (value > orig) { 124 /* We have to set low before high, just like stat64_max reads 125 * high before low. The value may become lower temporarily, but 126 * stat64_get does not notice (it takes the lock) and the only ill 127 * effect on stat64_max is that the slow path may be triggered 128 * unnecessarily. 129 */ | 121 122 orig = ((uint64_t)high << 32) | low; 123 if (value > orig) { 124 /* We have to set low before high, just like stat64_max reads 125 * high before low. The value may become lower temporarily, but 126 * stat64_get does not notice (it takes the lock) and the only ill 127 * effect on stat64_max is that the slow path may be triggered 128 * unnecessarily. 129 */ |
130 atomic_set(&s->low, (uint32_t)value); | 130 qatomic_set(&s->low, (uint32_t)value); |
131 smp_wmb(); | 131 smp_wmb(); |
132 atomic_set(&s->high, value >> 32); | 132 qatomic_set(&s->high, value >> 32); |
133 } 134 stat64_wrunlock(s); 135 return true; 136} 137#endif | 133 } 134 stat64_wrunlock(s); 135 return true; 136} 137#endif |