1/*
2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
22 * USA
23 *
24 */
25
26.file "cast5-avx-x86_64-asm_64.S"
27
28.extern cast5_s1
29.extern cast5_s2
30.extern cast5_s3
31.extern cast5_s4
32
33/* structure of crypto context */
34#define km	0
35#define kr	(16*4)
36#define rr	((16*4)+16)
37
38/* s-boxes */
39#define s1	cast5_s1
40#define s2	cast5_s2
41#define s3	cast5_s3
42#define s4	cast5_s4
43
44/**********************************************************************
45  16-way AVX cast5
46 **********************************************************************/
47#define CTX %rdi
48
49#define RL1 %xmm0
50#define RR1 %xmm1
51#define RL2 %xmm2
52#define RR2 %xmm3
53#define RL3 %xmm4
54#define RR3 %xmm5
55#define RL4 %xmm6
56#define RR4 %xmm7
57
58#define RX %xmm8
59
60#define RKM  %xmm9
61#define RKR  %xmm10
62#define RKRF %xmm11
63#define RKRR %xmm12
64
65#define R32  %xmm13
66#define R1ST %xmm14
67
68#define RTMP %xmm15
69
70#define RID1  %rbp
71#define RID1d %ebp
72#define RID2  %rsi
73#define RID2d %esi
74
75#define RGI1   %rdx
76#define RGI1bl %dl
77#define RGI1bh %dh
78#define RGI2   %rcx
79#define RGI2bl %cl
80#define RGI2bh %ch
81
82#define RGI3   %rax
83#define RGI3bl %al
84#define RGI3bh %ah
85#define RGI4   %rbx
86#define RGI4bl %bl
87#define RGI4bh %bh
88
89#define RFS1  %r8
90#define RFS1d %r8d
91#define RFS2  %r9
92#define RFS2d %r9d
93#define RFS3  %r10
94#define RFS3d %r10d
95
96
97#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
98	movzbl		src ## bh,     RID1d;    \
99	movzbl		src ## bl,     RID2d;    \
100	shrq $16,	src;                     \
101	movl		s1(, RID1, 4), dst ## d; \
102	op1		s2(, RID2, 4), dst ## d; \
103	movzbl		src ## bh,     RID1d;    \
104	movzbl		src ## bl,     RID2d;    \
105	interleave_op(il_reg);			 \
106	op2		s3(, RID1, 4), dst ## d; \
107	op3		s4(, RID2, 4), dst ## d;
108
109#define dummy(d) /* do nothing */
110
111#define shr_next(reg) \
112	shrq $16,	reg;
113
114#define F_head(a, x, gi1, gi2, op0) \
115	op0	a,	RKM,  x;                 \
116	vpslld	RKRF,	x,    RTMP;              \
117	vpsrld	RKRR,	x,    x;                 \
118	vpor	RTMP,	x,    x;                 \
119	\
120	vmovq		x,    gi1;               \
121	vpextrq $1,	x,    gi2;
122
123#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
124	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
125	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
126	\
127	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
128	shlq $32,	RFS2;                                      \
129	orq		RFS1, RFS2;                                \
130	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
131	shlq $32,	RFS1;                                      \
132	orq		RFS1, RFS3;                                \
133	\
134	vmovq		RFS2, x;                                   \
135	vpinsrq $1,	RFS3, x, x;
136
137#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
138	F_head(b1, RX, RGI1, RGI2, op0);              \
139	F_head(b2, RX, RGI3, RGI4, op0);              \
140	\
141	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
142	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
143	\
144	vpxor		a1, RX,   a1;                 \
145	vpxor		a2, RTMP, a2;
146
147#define F1_2(a1, b1, a2, b2) \
148	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
149#define F2_2(a1, b1, a2, b2) \
150	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
151#define F3_2(a1, b1, a2, b2) \
152	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
153
154#define subround(a1, b1, a2, b2, f) \
155	F ## f ## _2(a1, b1, a2, b2);
156
157#define round(l, r, n, f) \
158	vbroadcastss 	(km+(4*n))(CTX), RKM;        \
159	vpand		R1ST,            RKR,  RKRF; \
160	vpsubq		RKRF,            R32,  RKRR; \
161	vpsrldq $1,	RKR,             RKR;        \
162	subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
163	subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
164
165#define enc_preload_rkr() \
166	vbroadcastss	.L16_mask,                RKR;      \
167	/* add 16-bit rotation to key rotations (mod 32) */ \
168	vpxor		kr(CTX),                  RKR, RKR;
169
170#define dec_preload_rkr() \
171	vbroadcastss	.L16_mask,                RKR;      \
172	/* add 16-bit rotation to key rotations (mod 32) */ \
173	vpxor		kr(CTX),                  RKR, RKR; \
174	vpshufb		.Lbswap128_mask,          RKR, RKR;
175
176#define transpose_2x4(x0, x1, t0, t1) \
177	vpunpckldq		x1, x0, t0; \
178	vpunpckhdq		x1, x0, t1; \
179	\
180	vpunpcklqdq		t1, t0, x0; \
181	vpunpckhqdq		t1, t0, x1;
182
183#define inpack_blocks(in, x0, x1, t0, t1, rmask) \
184	vmovdqu (0*4*4)(in),	x0; \
185	vmovdqu (1*4*4)(in),	x1; \
186	vpshufb rmask, 	x0,	x0; \
187	vpshufb rmask, 	x1,	x1; \
188	\
189	transpose_2x4(x0, x1, t0, t1)
190
191#define outunpack_blocks(out, x0, x1, t0, t1, rmask) \
192	transpose_2x4(x0, x1, t0, t1) \
193	\
194	vpshufb rmask,	x0, x0;           \
195	vpshufb rmask,	x1, x1;           \
196	vmovdqu 	x0, (0*4*4)(out); \
197	vmovdqu		x1, (1*4*4)(out);
198
199#define outunpack_xor_blocks(out, x0, x1, t0, t1, rmask) \
200	transpose_2x4(x0, x1, t0, t1) \
201	\
202	vpshufb rmask,	x0, x0;               \
203	vpshufb rmask,	x1, x1;               \
204	vpxor		(0*4*4)(out), x0, x0; \
205	vmovdqu 	x0, (0*4*4)(out);     \
206	vpxor		(1*4*4)(out), x1, x1; \
207	vmovdqu	        x1, (1*4*4)(out);
208
209.data
210
211.align 16
212.Lbswap_mask:
213	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
214.Lbswap128_mask:
215	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
216.L16_mask:
217	.byte 16, 16, 16, 16
218.L32_mask:
219	.byte 32, 0, 0, 0
220.Lfirst_mask:
221	.byte 0x1f, 0, 0, 0
222
223.text
224
225.align 16
226.global __cast5_enc_blk_16way
227.type   __cast5_enc_blk_16way,@function;
228
229__cast5_enc_blk_16way:
230	/* input:
231	 *	%rdi: ctx, CTX
232	 *	%rsi: dst
233	 *	%rdx: src
234	 *	%rcx: bool, if true: xor output
235	 */
236
237	pushq %rbp;
238	pushq %rbx;
239	pushq %rcx;
240
241	vmovdqa .Lbswap_mask, RKM;
242	vmovd .Lfirst_mask, R1ST;
243	vmovd .L32_mask, R32;
244	enc_preload_rkr();
245
246	leaq 1*(2*4*4)(%rdx), %rax;
247	inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
248	inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
249	leaq 2*(2*4*4)(%rdx), %rax;
250	inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
251	leaq 3*(2*4*4)(%rdx), %rax;
252	inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
253
254	movq %rsi, %r11;
255
256	round(RL, RR, 0, 1);
257	round(RR, RL, 1, 2);
258	round(RL, RR, 2, 3);
259	round(RR, RL, 3, 1);
260	round(RL, RR, 4, 2);
261	round(RR, RL, 5, 3);
262	round(RL, RR, 6, 1);
263	round(RR, RL, 7, 2);
264	round(RL, RR, 8, 3);
265	round(RR, RL, 9, 1);
266	round(RL, RR, 10, 2);
267	round(RR, RL, 11, 3);
268
269	movzbl rr(CTX), %eax;
270	testl %eax, %eax;
271	jnz __skip_enc;
272
273	round(RL, RR, 12, 1);
274	round(RR, RL, 13, 2);
275	round(RL, RR, 14, 3);
276	round(RR, RL, 15, 1);
277
278__skip_enc:
279	popq %rcx;
280	popq %rbx;
281	popq %rbp;
282
283	vmovdqa .Lbswap_mask, RKM;
284	leaq 1*(2*4*4)(%r11), %rax;
285
286	testb %cl, %cl;
287	jnz __enc_xor16;
288
289	outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
290	outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
291	leaq 2*(2*4*4)(%r11), %rax;
292	outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
293	leaq 3*(2*4*4)(%r11), %rax;
294	outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
295
296	ret;
297
298__enc_xor16:
299	outunpack_xor_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
300	outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
301	leaq 2*(2*4*4)(%r11), %rax;
302	outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
303	leaq 3*(2*4*4)(%r11), %rax;
304	outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
305
306	ret;
307
308.align 16
309.global cast5_dec_blk_16way
310.type   cast5_dec_blk_16way,@function;
311
312cast5_dec_blk_16way:
313	/* input:
314	 *	%rdi: ctx, CTX
315	 *	%rsi: dst
316	 *	%rdx: src
317	 */
318
319	pushq %rbp;
320	pushq %rbx;
321
322	vmovdqa .Lbswap_mask, RKM;
323	vmovd .Lfirst_mask, R1ST;
324	vmovd .L32_mask, R32;
325	dec_preload_rkr();
326
327	leaq 1*(2*4*4)(%rdx), %rax;
328	inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
329	inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
330	leaq 2*(2*4*4)(%rdx), %rax;
331	inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
332	leaq 3*(2*4*4)(%rdx), %rax;
333	inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
334
335	movq %rsi, %r11;
336
337	movzbl rr(CTX), %eax;
338	testl %eax, %eax;
339	jnz __skip_dec;
340
341	round(RL, RR, 15, 1);
342	round(RR, RL, 14, 3);
343	round(RL, RR, 13, 2);
344	round(RR, RL, 12, 1);
345
346__dec_tail:
347	round(RL, RR, 11, 3);
348	round(RR, RL, 10, 2);
349	round(RL, RR, 9, 1);
350	round(RR, RL, 8, 3);
351	round(RL, RR, 7, 2);
352	round(RR, RL, 6, 1);
353	round(RL, RR, 5, 3);
354	round(RR, RL, 4, 2);
355	round(RL, RR, 3, 1);
356	round(RR, RL, 2, 3);
357	round(RL, RR, 1, 2);
358	round(RR, RL, 0, 1);
359
360	vmovdqa .Lbswap_mask, RKM;
361	popq %rbx;
362	popq %rbp;
363
364	leaq 1*(2*4*4)(%r11), %rax;
365	outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
366	outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
367	leaq 2*(2*4*4)(%r11), %rax;
368	outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
369	leaq 3*(2*4*4)(%r11), %rax;
370	outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
371
372	ret;
373
374__skip_dec:
375	vpsrldq $4, RKR, RKR;
376	jmp __dec_tail;
377