1/* 2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64) 3 * 4 * Copyright (C) 2012 Johannes Goetzfried 5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 6 * 7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 * USA 23 * 24 */ 25 26#include <linux/linkage.h> 27#include "glue_helper-asm-avx.S" 28 29.file "cast6-avx-x86_64-asm_64.S" 30 31.extern cast_s1 32.extern cast_s2 33.extern cast_s3 34.extern cast_s4 35 36/* structure of crypto context */ 37#define km 0 38#define kr (12*4*4) 39 40/* s-boxes */ 41#define s1 cast_s1 42#define s2 cast_s2 43#define s3 cast_s3 44#define s4 cast_s4 45 46/********************************************************************** 47 8-way AVX cast6 48 **********************************************************************/ 49#define CTX %rdi 50 51#define RA1 %xmm0 52#define RB1 %xmm1 53#define RC1 %xmm2 54#define RD1 %xmm3 55 56#define RA2 %xmm4 57#define RB2 %xmm5 58#define RC2 %xmm6 59#define RD2 %xmm7 60 61#define RX %xmm8 62 63#define RKM %xmm9 64#define RKR %xmm10 65#define RKRF %xmm11 66#define RKRR %xmm12 67#define R32 %xmm13 68#define R1ST %xmm14 69 70#define RTMP %xmm15 71 72#define RID1 %rbp 73#define RID1d %ebp 74#define RID2 %rsi 75#define RID2d %esi 76 77#define RGI1 %rdx 78#define RGI1bl %dl 79#define RGI1bh %dh 80#define RGI2 %rcx 81#define RGI2bl %cl 82#define RGI2bh %ch 83 84#define RGI3 %rax 85#define RGI3bl %al 86#define RGI3bh %ah 87#define RGI4 %rbx 88#define RGI4bl %bl 89#define RGI4bh %bh 90 91#define RFS1 %r8 92#define RFS1d %r8d 93#define RFS2 %r9 94#define RFS2d %r9d 95#define RFS3 %r10 96#define RFS3d %r10d 97 98 99#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ 100 movzbl src ## bh, RID1d; \ 101 movzbl src ## bl, RID2d; \ 102 shrq $16, src; \ 103 movl s1(, RID1, 4), dst ## d; \ 104 op1 s2(, RID2, 4), dst ## d; \ 105 movzbl src ## bh, RID1d; \ 106 movzbl src ## bl, RID2d; \ 107 interleave_op(il_reg); \ 108 op2 s3(, RID1, 4), dst ## d; \ 109 op3 s4(, RID2, 4), dst ## d; 110 111#define dummy(d) /* do nothing */ 112 113#define shr_next(reg) \ 114 shrq $16, reg; 115 116#define F_head(a, x, gi1, gi2, op0) \ 117 op0 a, RKM, x; \ 118 vpslld RKRF, x, RTMP; \ 119 vpsrld RKRR, x, x; \ 120 vpor RTMP, x, x; \ 121 \ 122 vmovq x, gi1; \ 123 vpextrq $1, x, gi2; 124 125#define F_tail(a, x, gi1, gi2, op1, op2, op3) \ 126 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ 127 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ 128 \ 129 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ 130 shlq $32, RFS2; \ 131 orq RFS1, RFS2; \ 132 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ 133 shlq $32, RFS1; \ 134 orq RFS1, RFS3; \ 135 \ 136 vmovq RFS2, x; \ 137 vpinsrq $1, RFS3, x, x; 138 139#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ 140 F_head(b1, RX, RGI1, RGI2, op0); \ 141 F_head(b2, RX, RGI3, RGI4, op0); \ 142 \ 143 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 144 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 145 \ 146 vpxor a1, RX, a1; \ 147 vpxor a2, RTMP, a2; 148 149#define F1_2(a1, b1, a2, b2) \ 150 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 151#define F2_2(a1, b1, a2, b2) \ 152 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 153#define F3_2(a1, b1, a2, b2) \ 154 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 155 156#define qop(in, out, f) \ 157 F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2); 158 159#define get_round_keys(nn) \ 160 vbroadcastss (km+(4*(nn)))(CTX), RKM; \ 161 vpand R1ST, RKR, RKRF; \ 162 vpsubq RKRF, R32, RKRR; \ 163 vpsrldq $1, RKR, RKR; 164 165#define Q(n) \ 166 get_round_keys(4*n+0); \ 167 qop(RD, RC, 1); \ 168 \ 169 get_round_keys(4*n+1); \ 170 qop(RC, RB, 2); \ 171 \ 172 get_round_keys(4*n+2); \ 173 qop(RB, RA, 3); \ 174 \ 175 get_round_keys(4*n+3); \ 176 qop(RA, RD, 1); 177 178#define QBAR(n) \ 179 get_round_keys(4*n+3); \ 180 qop(RA, RD, 1); \ 181 \ 182 get_round_keys(4*n+2); \ 183 qop(RB, RA, 3); \ 184 \ 185 get_round_keys(4*n+1); \ 186 qop(RC, RB, 2); \ 187 \ 188 get_round_keys(4*n+0); \ 189 qop(RD, RC, 1); 190 191#define shuffle(mask) \ 192 vpshufb mask, RKR, RKR; 193 194#define preload_rkr(n, do_mask, mask) \ 195 vbroadcastss .L16_mask, RKR; \ 196 /* add 16-bit rotation to key rotations (mod 32) */ \ 197 vpxor (kr+n*16)(CTX), RKR, RKR; \ 198 do_mask(mask); 199 200#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ 201 vpunpckldq x1, x0, t0; \ 202 vpunpckhdq x1, x0, t2; \ 203 vpunpckldq x3, x2, t1; \ 204 vpunpckhdq x3, x2, x3; \ 205 \ 206 vpunpcklqdq t1, t0, x0; \ 207 vpunpckhqdq t1, t0, x1; \ 208 vpunpcklqdq x3, t2, x2; \ 209 vpunpckhqdq x3, t2, x3; 210 211#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ 212 vpshufb rmask, x0, x0; \ 213 vpshufb rmask, x1, x1; \ 214 vpshufb rmask, x2, x2; \ 215 vpshufb rmask, x3, x3; \ 216 \ 217 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 218 219#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ 220 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ 221 \ 222 vpshufb rmask, x0, x0; \ 223 vpshufb rmask, x1, x1; \ 224 vpshufb rmask, x2, x2; \ 225 vpshufb rmask, x3, x3; 226 227.data 228 229.align 16 230.Lbswap_mask: 231 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 232.Lbswap128_mask: 233 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 234.Lrkr_enc_Q_Q_QBAR_QBAR: 235 .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12 236.Lrkr_enc_QBAR_QBAR_QBAR_QBAR: 237 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 238.Lrkr_dec_Q_Q_Q_Q: 239 .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 240.Lrkr_dec_Q_Q_QBAR_QBAR: 241 .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0 242.Lrkr_dec_QBAR_QBAR_QBAR_QBAR: 243 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 244.L16_mask: 245 .byte 16, 16, 16, 16 246.L32_mask: 247 .byte 32, 0, 0, 0 248.Lfirst_mask: 249 .byte 0x1f, 0, 0, 0 250 251.text 252 253.align 8 254__cast6_enc_blk8: 255 /* input: 256 * %rdi: ctx, CTX 257 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks 258 * output: 259 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks 260 */ 261 262 pushq %rbp; 263 pushq %rbx; 264 265 vmovdqa .Lbswap_mask, RKM; 266 vmovd .Lfirst_mask, R1ST; 267 vmovd .L32_mask, R32; 268 269 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 270 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 271 272 preload_rkr(0, dummy, none); 273 Q(0); 274 Q(1); 275 Q(2); 276 Q(3); 277 preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR); 278 Q(4); 279 Q(5); 280 QBAR(6); 281 QBAR(7); 282 preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR); 283 QBAR(8); 284 QBAR(9); 285 QBAR(10); 286 QBAR(11); 287 288 popq %rbx; 289 popq %rbp; 290 291 vmovdqa .Lbswap_mask, RKM; 292 293 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 294 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 295 296 ret; 297ENDPROC(__cast6_enc_blk8) 298 299.align 8 300__cast6_dec_blk8: 301 /* input: 302 * %rdi: ctx, CTX 303 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks 304 * output: 305 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks 306 */ 307 308 pushq %rbp; 309 pushq %rbx; 310 311 vmovdqa .Lbswap_mask, RKM; 312 vmovd .Lfirst_mask, R1ST; 313 vmovd .L32_mask, R32; 314 315 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 316 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 317 318 preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q); 319 Q(11); 320 Q(10); 321 Q(9); 322 Q(8); 323 preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR); 324 Q(7); 325 Q(6); 326 QBAR(5); 327 QBAR(4); 328 preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR); 329 QBAR(3); 330 QBAR(2); 331 QBAR(1); 332 QBAR(0); 333 334 popq %rbx; 335 popq %rbp; 336 337 vmovdqa .Lbswap_mask, RKM; 338 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 339 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 340 341 ret; 342ENDPROC(__cast6_dec_blk8) 343 344ENTRY(cast6_ecb_enc_8way) 345 /* input: 346 * %rdi: ctx, CTX 347 * %rsi: dst 348 * %rdx: src 349 */ 350 351 movq %rsi, %r11; 352 353 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 354 355 call __cast6_enc_blk8; 356 357 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 358 359 ret; 360ENDPROC(cast6_ecb_enc_8way) 361 362ENTRY(cast6_ecb_dec_8way) 363 /* input: 364 * %rdi: ctx, CTX 365 * %rsi: dst 366 * %rdx: src 367 */ 368 369 movq %rsi, %r11; 370 371 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 372 373 call __cast6_dec_blk8; 374 375 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 376 377 ret; 378ENDPROC(cast6_ecb_dec_8way) 379 380ENTRY(cast6_cbc_dec_8way) 381 /* input: 382 * %rdi: ctx, CTX 383 * %rsi: dst 384 * %rdx: src 385 */ 386 387 pushq %r12; 388 389 movq %rsi, %r11; 390 movq %rdx, %r12; 391 392 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 393 394 call __cast6_dec_blk8; 395 396 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 397 398 popq %r12; 399 400 ret; 401ENDPROC(cast6_cbc_dec_8way) 402 403ENTRY(cast6_ctr_8way) 404 /* input: 405 * %rdi: ctx, CTX 406 * %rsi: dst 407 * %rdx: src 408 * %rcx: iv (little endian, 128bit) 409 */ 410 411 pushq %r12; 412 413 movq %rsi, %r11; 414 movq %rdx, %r12; 415 416 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, 417 RD2, RX, RKR, RKM); 418 419 call __cast6_enc_blk8; 420 421 store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 422 423 popq %r12; 424 425 ret; 426ENDPROC(cast6_ctr_8way) 427