1 /* 2 * Simple C functions to supplement the C library 3 * 4 * Copyright (c) 2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "qemu/cutils.h" 26 #include "qemu/bswap.h" 27 28 static bool 29 buffer_zero_int(const void *buf, size_t len) 30 { 31 if (unlikely(len < 8)) { 32 /* For a very small buffer, simply accumulate all the bytes. */ 33 const unsigned char *p = buf; 34 const unsigned char *e = buf + len; 35 unsigned char t = 0; 36 37 do { 38 t |= *p++; 39 } while (p < e); 40 41 return t == 0; 42 } else { 43 /* Otherwise, use the unaligned memory access functions to 44 handle the beginning and end of the buffer, with a couple 45 of loops handling the middle aligned section. */ 46 uint64_t t = ldq_he_p(buf); 47 const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8); 48 const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8); 49 50 for (; p + 8 <= e; p += 8) { 51 __builtin_prefetch(p + 8); 52 if (t) { 53 return false; 54 } 55 t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7]; 56 } 57 while (p < e) { 58 t |= *p++; 59 } 60 t |= ldq_he_p(buf + len - 8); 61 62 return t == 0; 63 } 64 } 65 66 #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__) 67 /* Do not use push_options pragmas unnecessarily, because clang 68 * does not support them. 69 */ 70 #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 71 #pragma GCC push_options 72 #pragma GCC target("sse2") 73 #endif 74 #include <emmintrin.h> 75 76 /* Note that each of these vectorized functions require len >= 64. */ 77 78 static bool 79 buffer_zero_sse2(const void *buf, size_t len) 80 { 81 __m128i t = _mm_loadu_si128(buf); 82 __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16); 83 __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16); 84 __m128i zero = _mm_setzero_si128(); 85 86 /* Loop over 16-byte aligned blocks of 64. */ 87 while (likely(p <= e)) { 88 __builtin_prefetch(p); 89 t = _mm_cmpeq_epi8(t, zero); 90 if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) { 91 return false; 92 } 93 t = p[-4] | p[-3] | p[-2] | p[-1]; 94 p += 4; 95 } 96 97 /* Finish the aligned tail. */ 98 t |= e[-3]; 99 t |= e[-2]; 100 t |= e[-1]; 101 102 /* Finish the unaligned tail. */ 103 t |= _mm_loadu_si128(buf + len - 16); 104 105 return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF; 106 } 107 #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 108 #pragma GCC pop_options 109 #endif 110 111 #ifdef CONFIG_AVX2_OPT 112 /* Note that due to restrictions/bugs wrt __builtin functions in gcc <= 4.8, 113 * the includes have to be within the corresponding push_options region, and 114 * therefore the regions themselves have to be ordered with increasing ISA. 115 */ 116 #pragma GCC push_options 117 #pragma GCC target("sse4") 118 #include <smmintrin.h> 119 120 static bool 121 buffer_zero_sse4(const void *buf, size_t len) 122 { 123 __m128i t = _mm_loadu_si128(buf); 124 __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16); 125 __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16); 126 127 /* Loop over 16-byte aligned blocks of 64. */ 128 while (likely(p <= e)) { 129 __builtin_prefetch(p); 130 if (unlikely(!_mm_testz_si128(t, t))) { 131 return false; 132 } 133 t = p[-4] | p[-3] | p[-2] | p[-1]; 134 p += 4; 135 } 136 137 /* Finish the aligned tail. */ 138 t |= e[-3]; 139 t |= e[-2]; 140 t |= e[-1]; 141 142 /* Finish the unaligned tail. */ 143 t |= _mm_loadu_si128(buf + len - 16); 144 145 return _mm_testz_si128(t, t); 146 } 147 148 #pragma GCC pop_options 149 #pragma GCC push_options 150 #pragma GCC target("avx2") 151 #include <immintrin.h> 152 153 static bool 154 buffer_zero_avx2(const void *buf, size_t len) 155 { 156 /* Begin with an unaligned head of 32 bytes. */ 157 __m256i t = _mm256_loadu_si256(buf); 158 __m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32); 159 __m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32); 160 161 if (likely(p <= e)) { 162 /* Loop over 32-byte aligned blocks of 128. */ 163 do { 164 __builtin_prefetch(p); 165 if (unlikely(!_mm256_testz_si256(t, t))) { 166 return false; 167 } 168 t = p[-4] | p[-3] | p[-2] | p[-1]; 169 p += 4; 170 } while (p <= e); 171 } else { 172 t |= _mm256_loadu_si256(buf + 32); 173 if (len <= 128) { 174 goto last2; 175 } 176 } 177 178 /* Finish the last block of 128 unaligned. */ 179 t |= _mm256_loadu_si256(buf + len - 4 * 32); 180 t |= _mm256_loadu_si256(buf + len - 3 * 32); 181 last2: 182 t |= _mm256_loadu_si256(buf + len - 2 * 32); 183 t |= _mm256_loadu_si256(buf + len - 1 * 32); 184 185 return _mm256_testz_si256(t, t); 186 } 187 #pragma GCC pop_options 188 #endif /* CONFIG_AVX2_OPT */ 189 190 #ifdef CONFIG_AVX512F_OPT 191 #pragma GCC push_options 192 #pragma GCC target("avx512f") 193 #include <immintrin.h> 194 195 static bool 196 buffer_zero_avx512(const void *buf, size_t len) 197 { 198 /* Begin with an unaligned head of 64 bytes. */ 199 __m512i t = _mm512_loadu_si512(buf); 200 __m512i *p = (__m512i *)(((uintptr_t)buf + 5 * 64) & -64); 201 __m512i *e = (__m512i *)(((uintptr_t)buf + len) & -64); 202 203 /* Loop over 64-byte aligned blocks of 256. */ 204 while (p <= e) { 205 __builtin_prefetch(p); 206 if (unlikely(_mm512_test_epi64_mask(t, t))) { 207 return false; 208 } 209 t = p[-4] | p[-3] | p[-2] | p[-1]; 210 p += 4; 211 } 212 213 t |= _mm512_loadu_si512(buf + len - 4 * 64); 214 t |= _mm512_loadu_si512(buf + len - 3 * 64); 215 t |= _mm512_loadu_si512(buf + len - 2 * 64); 216 t |= _mm512_loadu_si512(buf + len - 1 * 64); 217 218 return !_mm512_test_epi64_mask(t, t); 219 220 } 221 #pragma GCC pop_options 222 #endif 223 224 225 /* Note that for test_buffer_is_zero_next_accel, the most preferred 226 * ISA must have the least significant bit. 227 */ 228 #define CACHE_AVX512F 1 229 #define CACHE_AVX2 2 230 #define CACHE_SSE4 4 231 #define CACHE_SSE2 8 232 233 /* Make sure that these variables are appropriately initialized when 234 * SSE2 is enabled on the compiler command-line, but the compiler is 235 * too old to support CONFIG_AVX2_OPT. 236 */ 237 #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 238 # define INIT_CACHE 0 239 # define INIT_ACCEL buffer_zero_int 240 #else 241 # ifndef __SSE2__ 242 # error "ISA selection confusion" 243 # endif 244 # define INIT_CACHE CACHE_SSE2 245 # define INIT_ACCEL buffer_zero_sse2 246 #endif 247 248 static unsigned cpuid_cache = INIT_CACHE; 249 static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL; 250 static int length_to_accel = 64; 251 252 static void init_accel(unsigned cache) 253 { 254 bool (*fn)(const void *, size_t) = buffer_zero_int; 255 if (cache & CACHE_SSE2) { 256 fn = buffer_zero_sse2; 257 } 258 #ifdef CONFIG_AVX2_OPT 259 if (cache & CACHE_SSE4) { 260 fn = buffer_zero_sse4; 261 } 262 if (cache & CACHE_AVX2) { 263 fn = buffer_zero_avx2; 264 } 265 #endif 266 #ifdef CONFIG_AVX512F_OPT 267 if (cache & CACHE_AVX512F) { 268 fn = buffer_zero_avx512; 269 length_to_accel = 256; 270 } 271 #endif 272 buffer_accel = fn; 273 } 274 275 #if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) 276 #include "qemu/cpuid.h" 277 278 static void __attribute__((constructor)) init_cpuid_cache(void) 279 { 280 int max = __get_cpuid_max(0, NULL); 281 int a, b, c, d; 282 unsigned cache = 0; 283 284 if (max >= 1) { 285 __cpuid(1, a, b, c, d); 286 if (d & bit_SSE2) { 287 cache |= CACHE_SSE2; 288 } 289 if (c & bit_SSE4_1) { 290 cache |= CACHE_SSE4; 291 } 292 293 /* We must check that AVX is not just available, but usable. */ 294 if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { 295 int bv; 296 __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); 297 __cpuid_count(7, 0, a, b, c, d); 298 if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) { 299 cache |= CACHE_AVX2; 300 } 301 /* 0xe6: 302 * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 303 * and ZMM16-ZMM31 state are enabled by OS) 304 * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) 305 */ 306 if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512F)) { 307 cache |= CACHE_AVX512F; 308 } 309 } 310 } 311 cpuid_cache = cache; 312 init_accel(cache); 313 } 314 #endif /* CONFIG_AVX2_OPT */ 315 316 bool test_buffer_is_zero_next_accel(void) 317 { 318 /* If no bits set, we just tested buffer_zero_int, and there 319 are no more acceleration options to test. */ 320 if (cpuid_cache == 0) { 321 return false; 322 } 323 /* Disable the accelerator we used before and select a new one. */ 324 cpuid_cache &= cpuid_cache - 1; 325 init_accel(cpuid_cache); 326 return true; 327 } 328 329 static bool select_accel_fn(const void *buf, size_t len) 330 { 331 if (likely(len >= length_to_accel)) { 332 return buffer_accel(buf, len); 333 } 334 return buffer_zero_int(buf, len); 335 } 336 337 #else 338 #define select_accel_fn buffer_zero_int 339 bool test_buffer_is_zero_next_accel(void) 340 { 341 return false; 342 } 343 #endif 344 345 /* 346 * Checks if a buffer is all zeroes 347 */ 348 bool buffer_is_zero(const void *buf, size_t len) 349 { 350 if (unlikely(len == 0)) { 351 return true; 352 } 353 354 /* Fetch the beginning of the buffer while we select the accelerator. */ 355 __builtin_prefetch(buf); 356 357 /* Use an optimized zero check if possible. Note that this also 358 includes a check for an unrolled loop over 64-bit integers. */ 359 return select_accel_fn(buf, len); 360 } 361