1 /* 2 * Carry-less multiply operations. 3 * SPDX-License-Identifier: GPL-2.0-or-later 4 * 5 * Copyright (C) 2023 Linaro, Ltd. 6 */ 7 8 #ifndef CRYPTO_CLMUL_H 9 #define CRYPTO_CLMUL_H 10 11 #include "qemu/int128.h" 12 #include "host/crypto/clmul.h" 13 14 /** 15 * clmul_8x8_low: 16 * 17 * Perform eight 8x8->8 carry-less multiplies. 18 */ 19 uint64_t clmul_8x8_low(uint64_t, uint64_t); 20 21 /** 22 * clmul_8x4_even: 23 * 24 * Perform four 8x8->16 carry-less multiplies. 25 * The odd bytes of the inputs are ignored. 26 */ 27 uint64_t clmul_8x4_even(uint64_t, uint64_t); 28 29 /** 30 * clmul_8x4_odd: 31 * 32 * Perform four 8x8->16 carry-less multiplies. 33 * The even bytes of the inputs are ignored. 34 */ 35 uint64_t clmul_8x4_odd(uint64_t, uint64_t); 36 37 /** 38 * clmul_8x4_packed: 39 * 40 * Perform four 8x8->16 carry-less multiplies. 41 */ 42 uint64_t clmul_8x4_packed(uint32_t, uint32_t); 43 44 /** 45 * clmul_16x2_even: 46 * 47 * Perform two 16x16->32 carry-less multiplies. 48 * The odd words of the inputs are ignored. 49 */ 50 uint64_t clmul_16x2_even(uint64_t, uint64_t); 51 52 /** 53 * clmul_16x2_odd: 54 * 55 * Perform two 16x16->32 carry-less multiplies. 56 * The even words of the inputs are ignored. 57 */ 58 uint64_t clmul_16x2_odd(uint64_t, uint64_t); 59 60 /** 61 * clmul_32: 62 * 63 * Perform a 32x32->64 carry-less multiply. 64 */ 65 uint64_t clmul_32(uint32_t, uint32_t); 66 67 /** 68 * clmul_64: 69 * 70 * Perform a 64x64->128 carry-less multiply. 71 */ 72 Int128 clmul_64_gen(uint64_t, uint64_t); 73 74 static inline Int128 clmul_64(uint64_t a, uint64_t b) 75 { 76 if (HAVE_CLMUL_ACCEL) { 77 return clmul_64_accel(a, b); 78 } else { 79 return clmul_64_gen(a, b); 80 } 81 } 82 83 #endif /* CRYPTO_CLMUL_H */ 84