1*2d32a5d2SRichard Henderson/* 2*2d32a5d2SRichard Henderson * SPDX-License-Identifier: GPL-2.0-or-later 3*2d32a5d2SRichard Henderson * buffer_is_zero acceleration, aarch64 version. 4*2d32a5d2SRichard Henderson */ 5*2d32a5d2SRichard Henderson 6*2d32a5d2SRichard Henderson#ifdef __ARM_NEON 7*2d32a5d2SRichard Henderson#include <arm_neon.h> 8*2d32a5d2SRichard Henderson 9*2d32a5d2SRichard Henderson/* 10*2d32a5d2SRichard Henderson * Helper for preventing the compiler from reassociating 11*2d32a5d2SRichard Henderson * chains of binary vector operations. 12*2d32a5d2SRichard Henderson */ 13*2d32a5d2SRichard Henderson#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1)) 14*2d32a5d2SRichard Henderson 15*2d32a5d2SRichard Hendersonstatic bool buffer_is_zero_simd(const void *buf, size_t len) 16*2d32a5d2SRichard Henderson{ 17*2d32a5d2SRichard Henderson uint32x4_t t0, t1, t2, t3; 18*2d32a5d2SRichard Henderson 19*2d32a5d2SRichard Henderson /* Align head/tail to 16-byte boundaries. */ 20*2d32a5d2SRichard Henderson const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16); 21*2d32a5d2SRichard Henderson const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16); 22*2d32a5d2SRichard Henderson 23*2d32a5d2SRichard Henderson /* Unaligned loads at head/tail. */ 24*2d32a5d2SRichard Henderson t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16); 25*2d32a5d2SRichard Henderson 26*2d32a5d2SRichard Henderson /* Collect a partial block at tail end. */ 27*2d32a5d2SRichard Henderson t1 = e[-7] | e[-6]; 28*2d32a5d2SRichard Henderson t2 = e[-5] | e[-4]; 29*2d32a5d2SRichard Henderson t3 = e[-3] | e[-2]; 30*2d32a5d2SRichard Henderson t0 |= e[-1]; 31*2d32a5d2SRichard Henderson REASSOC_BARRIER(t0, t1); 32*2d32a5d2SRichard Henderson REASSOC_BARRIER(t2, t3); 33*2d32a5d2SRichard Henderson t0 |= t1; 34*2d32a5d2SRichard Henderson t2 |= t3; 35*2d32a5d2SRichard Henderson REASSOC_BARRIER(t0, t2); 36*2d32a5d2SRichard Henderson t0 |= t2; 37*2d32a5d2SRichard Henderson 38*2d32a5d2SRichard Henderson /* 39*2d32a5d2SRichard Henderson * Loop over complete 128-byte blocks. 40*2d32a5d2SRichard Henderson * With the head and tail removed, e - p >= 14, so the loop 41*2d32a5d2SRichard Henderson * must iterate at least once. 42*2d32a5d2SRichard Henderson */ 43*2d32a5d2SRichard Henderson do { 44*2d32a5d2SRichard Henderson /* 45*2d32a5d2SRichard Henderson * Reduce via UMAXV. Whatever the actual result, 46*2d32a5d2SRichard Henderson * it will only be zero if all input bytes are zero. 47*2d32a5d2SRichard Henderson */ 48*2d32a5d2SRichard Henderson if (unlikely(vmaxvq_u32(t0) != 0)) { 49*2d32a5d2SRichard Henderson return false; 50*2d32a5d2SRichard Henderson } 51*2d32a5d2SRichard Henderson 52*2d32a5d2SRichard Henderson t0 = p[0] | p[1]; 53*2d32a5d2SRichard Henderson t1 = p[2] | p[3]; 54*2d32a5d2SRichard Henderson t2 = p[4] | p[5]; 55*2d32a5d2SRichard Henderson t3 = p[6] | p[7]; 56*2d32a5d2SRichard Henderson REASSOC_BARRIER(t0, t1); 57*2d32a5d2SRichard Henderson REASSOC_BARRIER(t2, t3); 58*2d32a5d2SRichard Henderson t0 |= t1; 59*2d32a5d2SRichard Henderson t2 |= t3; 60*2d32a5d2SRichard Henderson REASSOC_BARRIER(t0, t2); 61*2d32a5d2SRichard Henderson t0 |= t2; 62*2d32a5d2SRichard Henderson p += 8; 63*2d32a5d2SRichard Henderson } while (p < e - 7); 64*2d32a5d2SRichard Henderson 65*2d32a5d2SRichard Henderson return vmaxvq_u32(t0) == 0; 66*2d32a5d2SRichard Henderson} 67*2d32a5d2SRichard Henderson 68*2d32a5d2SRichard Hendersonstatic biz_accel_fn const accel_table[] = { 69*2d32a5d2SRichard Henderson buffer_is_zero_int_ge256, 70*2d32a5d2SRichard Henderson buffer_is_zero_simd, 71*2d32a5d2SRichard Henderson}; 72*2d32a5d2SRichard Henderson 73*2d32a5d2SRichard Henderson#define best_accel() 1 74*2d32a5d2SRichard Henderson#else 75*2d32a5d2SRichard Henderson# include "host/include/generic/host/bufferiszero.c.inc" 76*2d32a5d2SRichard Henderson#endif 77