xref: /openbmc/qemu/util/bufferiszero.c (revision d018425c324704949c7f65230def9586e71f07f5)
188ca8e80SRichard Henderson /*
288ca8e80SRichard Henderson  * Simple C functions to supplement the C library
388ca8e80SRichard Henderson  *
488ca8e80SRichard Henderson  * Copyright (c) 2006 Fabrice Bellard
588ca8e80SRichard Henderson  *
688ca8e80SRichard Henderson  * Permission is hereby granted, free of charge, to any person obtaining a copy
788ca8e80SRichard Henderson  * of this software and associated documentation files (the "Software"), to deal
888ca8e80SRichard Henderson  * in the Software without restriction, including without limitation the rights
988ca8e80SRichard Henderson  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1088ca8e80SRichard Henderson  * copies of the Software, and to permit persons to whom the Software is
1188ca8e80SRichard Henderson  * furnished to do so, subject to the following conditions:
1288ca8e80SRichard Henderson  *
1388ca8e80SRichard Henderson  * The above copyright notice and this permission notice shall be included in
1488ca8e80SRichard Henderson  * all copies or substantial portions of the Software.
1588ca8e80SRichard Henderson  *
1688ca8e80SRichard Henderson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1788ca8e80SRichard Henderson  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1888ca8e80SRichard Henderson  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1988ca8e80SRichard Henderson  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2088ca8e80SRichard Henderson  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2188ca8e80SRichard Henderson  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2288ca8e80SRichard Henderson  * THE SOFTWARE.
2388ca8e80SRichard Henderson  */
2488ca8e80SRichard Henderson #include "qemu/osdep.h"
2588ca8e80SRichard Henderson #include "qemu/cutils.h"
265e33a872SRichard Henderson #include "qemu/bswap.h"
2751f4d916SRichard Henderson #include "host/cpuinfo.h"
2888ca8e80SRichard Henderson 
295e33a872SRichard Henderson static bool
305e33a872SRichard Henderson buffer_zero_int(const void *buf, size_t len)
315e33a872SRichard Henderson {
325e33a872SRichard Henderson     if (unlikely(len < 8)) {
335e33a872SRichard Henderson         /* For a very small buffer, simply accumulate all the bytes.  */
345e33a872SRichard Henderson         const unsigned char *p = buf;
355e33a872SRichard Henderson         const unsigned char *e = buf + len;
365e33a872SRichard Henderson         unsigned char t = 0;
375e33a872SRichard Henderson 
385e33a872SRichard Henderson         do {
395e33a872SRichard Henderson             t |= *p++;
405e33a872SRichard Henderson         } while (p < e);
415e33a872SRichard Henderson 
425e33a872SRichard Henderson         return t == 0;
435e33a872SRichard Henderson     } else {
445e33a872SRichard Henderson         /* Otherwise, use the unaligned memory access functions to
455e33a872SRichard Henderson            handle the beginning and end of the buffer, with a couple
465e33a872SRichard Henderson            of loops handling the middle aligned section.  */
475e33a872SRichard Henderson         uint64_t t = ldq_he_p(buf);
485e33a872SRichard Henderson         const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8);
495e33a872SRichard Henderson         const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8);
505e33a872SRichard Henderson 
515e33a872SRichard Henderson         for (; p + 8 <= e; p += 8) {
525e33a872SRichard Henderson             __builtin_prefetch(p + 8);
535e33a872SRichard Henderson             if (t) {
545e33a872SRichard Henderson                 return false;
555e33a872SRichard Henderson             }
565e33a872SRichard Henderson             t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
575e33a872SRichard Henderson         }
585e33a872SRichard Henderson         while (p < e) {
595e33a872SRichard Henderson             t |= *p++;
605e33a872SRichard Henderson         }
615e33a872SRichard Henderson         t |= ldq_he_p(buf + len - 8);
625e33a872SRichard Henderson 
635e33a872SRichard Henderson         return t == 0;
645e33a872SRichard Henderson     }
655e33a872SRichard Henderson }
665e33a872SRichard Henderson 
67*d018425cSAlexander Monakov #if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
68701ea587SRichard Henderson #include <immintrin.h>
69d9911d14SRichard Henderson 
70d9911d14SRichard Henderson /* Note that each of these vectorized functions require len >= 64.  */
71d9911d14SRichard Henderson 
72701ea587SRichard Henderson static bool __attribute__((target("sse2")))
73d9911d14SRichard Henderson buffer_zero_sse2(const void *buf, size_t len)
74d9911d14SRichard Henderson {
75d9911d14SRichard Henderson     __m128i t = _mm_loadu_si128(buf);
76d9911d14SRichard Henderson     __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
77d9911d14SRichard Henderson     __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
78d9911d14SRichard Henderson     __m128i zero = _mm_setzero_si128();
79d9911d14SRichard Henderson 
80d9911d14SRichard Henderson     /* Loop over 16-byte aligned blocks of 64.  */
81d9911d14SRichard Henderson     while (likely(p <= e)) {
82d9911d14SRichard Henderson         __builtin_prefetch(p);
83d9911d14SRichard Henderson         t = _mm_cmpeq_epi8(t, zero);
84d9911d14SRichard Henderson         if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) {
85d9911d14SRichard Henderson             return false;
86d9911d14SRichard Henderson         }
87d9911d14SRichard Henderson         t = p[-4] | p[-3] | p[-2] | p[-1];
88d9911d14SRichard Henderson         p += 4;
89d9911d14SRichard Henderson     }
90d9911d14SRichard Henderson 
91d9911d14SRichard Henderson     /* Finish the aligned tail.  */
92d9911d14SRichard Henderson     t |= e[-3];
93d9911d14SRichard Henderson     t |= e[-2];
94d9911d14SRichard Henderson     t |= e[-1];
95d9911d14SRichard Henderson 
96d9911d14SRichard Henderson     /* Finish the unaligned tail.  */
97d9911d14SRichard Henderson     t |= _mm_loadu_si128(buf + len - 16);
98d9911d14SRichard Henderson 
99d9911d14SRichard Henderson     return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
100d9911d14SRichard Henderson }
10188ca8e80SRichard Henderson 
1025e33a872SRichard Henderson #ifdef CONFIG_AVX2_OPT
103701ea587SRichard Henderson static bool __attribute__((target("avx2")))
104d9911d14SRichard Henderson buffer_zero_avx2(const void *buf, size_t len)
105d9911d14SRichard Henderson {
106d9911d14SRichard Henderson     /* Begin with an unaligned head of 32 bytes.  */
107d9911d14SRichard Henderson     __m256i t = _mm256_loadu_si256(buf);
108d9911d14SRichard Henderson     __m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
109d9911d14SRichard Henderson     __m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
110d9911d14SRichard Henderson 
111d9911d14SRichard Henderson     /* Loop over 32-byte aligned blocks of 128.  */
1128f13a39dSRobert Hoo     while (p <= e) {
113d9911d14SRichard Henderson         __builtin_prefetch(p);
114d9911d14SRichard Henderson         if (unlikely(!_mm256_testz_si256(t, t))) {
115d9911d14SRichard Henderson             return false;
116d9911d14SRichard Henderson         }
117d9911d14SRichard Henderson         t = p[-4] | p[-3] | p[-2] | p[-1];
118d9911d14SRichard Henderson         p += 4;
1198f13a39dSRobert Hoo     } ;
120d9911d14SRichard Henderson 
121d9911d14SRichard Henderson     /* Finish the last block of 128 unaligned.  */
122d9911d14SRichard Henderson     t |= _mm256_loadu_si256(buf + len - 4 * 32);
123d9911d14SRichard Henderson     t |= _mm256_loadu_si256(buf + len - 3 * 32);
124d9911d14SRichard Henderson     t |= _mm256_loadu_si256(buf + len - 2 * 32);
125d9911d14SRichard Henderson     t |= _mm256_loadu_si256(buf + len - 1 * 32);
126d9911d14SRichard Henderson 
127d9911d14SRichard Henderson     return _mm256_testz_si256(t, t);
128d9911d14SRichard Henderson }
129d9911d14SRichard Henderson #endif /* CONFIG_AVX2_OPT */
130d9911d14SRichard Henderson 
13151f4d916SRichard Henderson /*
13251f4d916SRichard Henderson  * Make sure that these variables are appropriately initialized when
133d9911d14SRichard Henderson  * SSE2 is enabled on the compiler command-line, but the compiler is
1345dd89908SRichard Henderson  * too old to support CONFIG_AVX2_OPT.
135d9911d14SRichard Henderson  */
136*d018425cSAlexander Monakov #if defined(CONFIG_AVX2_OPT)
13751f4d916SRichard Henderson # define INIT_USED     0
13851f4d916SRichard Henderson # define INIT_LENGTH   0
139d9911d14SRichard Henderson # define INIT_ACCEL    buffer_zero_int
140d9911d14SRichard Henderson #else
141d9911d14SRichard Henderson # ifndef __SSE2__
142d9911d14SRichard Henderson #  error "ISA selection confusion"
143d9911d14SRichard Henderson # endif
14451f4d916SRichard Henderson # define INIT_USED     CPUINFO_SSE2
14551f4d916SRichard Henderson # define INIT_LENGTH   64
146d9911d14SRichard Henderson # define INIT_ACCEL    buffer_zero_sse2
1475e33a872SRichard Henderson #endif
1485e33a872SRichard Henderson 
14951f4d916SRichard Henderson static unsigned used_accel = INIT_USED;
15051f4d916SRichard Henderson static unsigned length_to_accel = INIT_LENGTH;
151d9911d14SRichard Henderson static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
1525e33a872SRichard Henderson 
15351f4d916SRichard Henderson static unsigned __attribute__((noinline))
15451f4d916SRichard Henderson select_accel_cpuinfo(unsigned info)
155d9911d14SRichard Henderson {
15651f4d916SRichard Henderson     /* Array is sorted in order of algorithm preference. */
15751f4d916SRichard Henderson     static const struct {
15851f4d916SRichard Henderson         unsigned bit;
15951f4d916SRichard Henderson         unsigned len;
16051f4d916SRichard Henderson         bool (*fn)(const void *, size_t);
16151f4d916SRichard Henderson     } all[] = {
16251f4d916SRichard Henderson #ifdef CONFIG_AVX2_OPT
16351f4d916SRichard Henderson         { CPUINFO_AVX2,    128, buffer_zero_avx2 },
16451f4d916SRichard Henderson #endif
16551f4d916SRichard Henderson         { CPUINFO_SSE2,     64, buffer_zero_sse2 },
16651f4d916SRichard Henderson         { CPUINFO_ALWAYS,    0, buffer_zero_int },
16751f4d916SRichard Henderson     };
16851f4d916SRichard Henderson 
16951f4d916SRichard Henderson     for (unsigned i = 0; i < ARRAY_SIZE(all); ++i) {
17051f4d916SRichard Henderson         if (info & all[i].bit) {
17151f4d916SRichard Henderson             length_to_accel = all[i].len;
17251f4d916SRichard Henderson             buffer_accel = all[i].fn;
17351f4d916SRichard Henderson             return all[i].bit;
17451f4d916SRichard Henderson         }
17551f4d916SRichard Henderson     }
17651f4d916SRichard Henderson     return 0;
177d9911d14SRichard Henderson }
1785e33a872SRichard Henderson 
179*d018425cSAlexander Monakov #if defined(CONFIG_AVX2_OPT)
18051f4d916SRichard Henderson static void __attribute__((constructor)) init_accel(void)
18188ca8e80SRichard Henderson {
18251f4d916SRichard Henderson     used_accel = select_accel_cpuinfo(cpuinfo_init());
1835e33a872SRichard Henderson }
184d9911d14SRichard Henderson #endif /* CONFIG_AVX2_OPT */
1855e33a872SRichard Henderson 
186efad6682SRichard Henderson bool test_buffer_is_zero_next_accel(void)
187efad6682SRichard Henderson {
18851f4d916SRichard Henderson     /*
18951f4d916SRichard Henderson      * Accumulate the accelerators that we've already tested, and
19051f4d916SRichard Henderson      * remove them from the set to test this round.  We'll get back
19151f4d916SRichard Henderson      * a zero from select_accel_cpuinfo when there are no more.
19251f4d916SRichard Henderson      */
19351f4d916SRichard Henderson     unsigned used = select_accel_cpuinfo(cpuinfo & ~used_accel);
19451f4d916SRichard Henderson     used_accel |= used;
19551f4d916SRichard Henderson     return used;
196efad6682SRichard Henderson }
197efad6682SRichard Henderson 
1985e33a872SRichard Henderson static bool select_accel_fn(const void *buf, size_t len)
1995e33a872SRichard Henderson {
20027f08ea1SRobert Hoo     if (likely(len >= length_to_accel)) {
201d9911d14SRichard Henderson         return buffer_accel(buf, len);
2025e33a872SRichard Henderson     }
2035e33a872SRichard Henderson     return buffer_zero_int(buf, len);
2045e33a872SRichard Henderson }
2055e33a872SRichard Henderson 
2065e33a872SRichard Henderson #else
2075e33a872SRichard Henderson #define select_accel_fn  buffer_zero_int
208efad6682SRichard Henderson bool test_buffer_is_zero_next_accel(void)
209efad6682SRichard Henderson {
210efad6682SRichard Henderson     return false;
211efad6682SRichard Henderson }
212efad6682SRichard Henderson #endif
213efad6682SRichard Henderson 
21488ca8e80SRichard Henderson /*
21588ca8e80SRichard Henderson  * Checks if a buffer is all zeroes
21688ca8e80SRichard Henderson  */
21788ca8e80SRichard Henderson bool buffer_is_zero(const void *buf, size_t len)
21888ca8e80SRichard Henderson {
2195e33a872SRichard Henderson     if (unlikely(len == 0)) {
22088ca8e80SRichard Henderson         return true;
22188ca8e80SRichard Henderson     }
22288ca8e80SRichard Henderson 
223083d012aSRichard Henderson     /* Fetch the beginning of the buffer while we select the accelerator.  */
224083d012aSRichard Henderson     __builtin_prefetch(buf);
225083d012aSRichard Henderson 
2265e33a872SRichard Henderson     /* Use an optimized zero check if possible.  Note that this also
2275e33a872SRichard Henderson        includes a check for an unrolled loop over 64-bit integers.  */
2285e33a872SRichard Henderson     return select_accel_fn(buf, len);
2295e33a872SRichard Henderson }
230