1/*
2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
22 * USA
23 *
24 */
25
26.file "cast6-avx-x86_64-asm_64.S"
27
28.extern cast6_s1
29.extern cast6_s2
30.extern cast6_s3
31.extern cast6_s4
32
33/* structure of crypto context */
34#define km	0
35#define kr	(12*4*4)
36
37/* s-boxes */
38#define s1	cast6_s1
39#define s2	cast6_s2
40#define s3	cast6_s3
41#define s4	cast6_s4
42
43/**********************************************************************
44  8-way AVX cast6
45 **********************************************************************/
46#define CTX %rdi
47
48#define RA1 %xmm0
49#define RB1 %xmm1
50#define RC1 %xmm2
51#define RD1 %xmm3
52
53#define RA2 %xmm4
54#define RB2 %xmm5
55#define RC2 %xmm6
56#define RD2 %xmm7
57
58#define RX  %xmm8
59
60#define RKM  %xmm9
61#define RKR  %xmm10
62#define RKRF %xmm11
63#define RKRR %xmm12
64#define R32  %xmm13
65#define R1ST %xmm14
66
67#define RTMP %xmm15
68
69#define RID1  %rbp
70#define RID1d %ebp
71#define RID2  %rsi
72#define RID2d %esi
73
74#define RGI1   %rdx
75#define RGI1bl %dl
76#define RGI1bh %dh
77#define RGI2   %rcx
78#define RGI2bl %cl
79#define RGI2bh %ch
80
81#define RGI3   %rax
82#define RGI3bl %al
83#define RGI3bh %ah
84#define RGI4   %rbx
85#define RGI4bl %bl
86#define RGI4bh %bh
87
88#define RFS1  %r8
89#define RFS1d %r8d
90#define RFS2  %r9
91#define RFS2d %r9d
92#define RFS3  %r10
93#define RFS3d %r10d
94
95
96#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
97	movzbl		src ## bh,     RID1d;    \
98	movzbl		src ## bl,     RID2d;    \
99	shrq $16,	src;                     \
100	movl		s1(, RID1, 4), dst ## d; \
101	op1		s2(, RID2, 4), dst ## d; \
102	movzbl		src ## bh,     RID1d;    \
103	movzbl		src ## bl,     RID2d;    \
104	interleave_op(il_reg);			 \
105	op2		s3(, RID1, 4), dst ## d; \
106	op3		s4(, RID2, 4), dst ## d;
107
108#define dummy(d) /* do nothing */
109
110#define shr_next(reg) \
111	shrq $16,	reg;
112
113#define F_head(a, x, gi1, gi2, op0) \
114	op0	a,	RKM,  x;                 \
115	vpslld	RKRF,	x,    RTMP;              \
116	vpsrld	RKRR,	x,    x;                 \
117	vpor	RTMP,	x,    x;                 \
118	\
119	vmovq		x,    gi1;               \
120	vpextrq $1,	x,    gi2;
121
122#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
123	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
124	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
125	\
126	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
127	shlq $32,	RFS2;                                      \
128	orq		RFS1, RFS2;                                \
129	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
130	shlq $32,	RFS1;                                      \
131	orq		RFS1, RFS3;                                \
132	\
133	vmovq		RFS2, x;                                   \
134	vpinsrq $1,	RFS3, x, x;
135
136#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
137	F_head(b1, RX, RGI1, RGI2, op0);              \
138	F_head(b2, RX, RGI3, RGI4, op0);              \
139	\
140	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
141	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
142	\
143	vpxor		a1, RX,   a1;                 \
144	vpxor		a2, RTMP, a2;
145
146#define F1_2(a1, b1, a2, b2) \
147	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
148#define F2_2(a1, b1, a2, b2) \
149	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
150#define F3_2(a1, b1, a2, b2) \
151	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
152
153#define qop(in, out, f) \
154	F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
155
156#define get_round_keys(nn) \
157	vbroadcastss	(km+(4*(nn)))(CTX), RKM;        \
158	vpand		R1ST,               RKR,  RKRF; \
159	vpsubq		RKRF,               R32,  RKRR; \
160	vpsrldq $1,	RKR,                RKR;
161
162#define Q(n) \
163	get_round_keys(4*n+0); \
164	qop(RD, RC, 1);        \
165	\
166	get_round_keys(4*n+1); \
167	qop(RC, RB, 2);        \
168	\
169	get_round_keys(4*n+2); \
170	qop(RB, RA, 3);        \
171	\
172	get_round_keys(4*n+3); \
173	qop(RA, RD, 1);
174
175#define QBAR(n) \
176	get_round_keys(4*n+3); \
177	qop(RA, RD, 1);        \
178	\
179	get_round_keys(4*n+2); \
180	qop(RB, RA, 3);        \
181	\
182	get_round_keys(4*n+1); \
183	qop(RC, RB, 2);        \
184	\
185	get_round_keys(4*n+0); \
186	qop(RD, RC, 1);
187
188#define shuffle(mask) \
189	vpshufb		mask,            RKR, RKR;
190
191#define preload_rkr(n, do_mask, mask) \
192	vbroadcastss	.L16_mask,                RKR;      \
193	/* add 16-bit rotation to key rotations (mod 32) */ \
194	vpxor		(kr+n*16)(CTX),           RKR, RKR; \
195	do_mask(mask);
196
197#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
198	vpunpckldq		x1, x0, t0; \
199	vpunpckhdq		x1, x0, t2; \
200	vpunpckldq		x3, x2, t1; \
201	vpunpckhdq		x3, x2, x3; \
202	\
203	vpunpcklqdq		t1, t0, x0; \
204	vpunpckhqdq		t1, t0, x1; \
205	vpunpcklqdq		x3, t2, x2; \
206	vpunpckhqdq		x3, t2, x3;
207
208#define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2, rmask) \
209	vmovdqu (0*4*4)(in),	x0; \
210	vmovdqu (1*4*4)(in),	x1; \
211	vmovdqu (2*4*4)(in),	x2; \
212	vmovdqu (3*4*4)(in),	x3; \
213	vpshufb rmask, x0,	x0; \
214	vpshufb rmask, x1,	x1; \
215	vpshufb rmask, x2,	x2; \
216	vpshufb rmask, x3,	x3; \
217	\
218	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
219
220#define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
221	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
222	\
223	vpshufb rmask,		x0, x0;       \
224	vpshufb rmask,		x1, x1;       \
225	vpshufb rmask,		x2, x2;       \
226	vpshufb rmask,		x3, x3;       \
227	vmovdqu x0,		(0*4*4)(out); \
228	vmovdqu	x1,		(1*4*4)(out); \
229	vmovdqu	x2,		(2*4*4)(out); \
230	vmovdqu	x3,		(3*4*4)(out);
231
232#define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
233	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
234	\
235	vpshufb rmask,		x0, x0;       \
236	vpshufb rmask,		x1, x1;       \
237	vpshufb rmask,		x2, x2;       \
238	vpshufb rmask,		x3, x3;       \
239	vpxor (0*4*4)(out),	x0, x0;       \
240	vmovdqu	x0,		(0*4*4)(out); \
241	vpxor (1*4*4)(out),	x1, x1;       \
242	vmovdqu	x1,		(1*4*4)(out); \
243	vpxor (2*4*4)(out),	x2, x2;       \
244	vmovdqu x2,		(2*4*4)(out); \
245	vpxor (3*4*4)(out),	x3, x3;       \
246	vmovdqu x3,		(3*4*4)(out);
247
248.data
249
250.align 16
251.Lbswap_mask:
252	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
253.Lrkr_enc_Q_Q_QBAR_QBAR:
254	.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
255.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
256	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
257.Lrkr_dec_Q_Q_Q_Q:
258	.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
259.Lrkr_dec_Q_Q_QBAR_QBAR:
260	.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
261.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
262	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
263.L16_mask:
264	.byte 16, 16, 16, 16
265.L32_mask:
266	.byte 32, 0, 0, 0
267.Lfirst_mask:
268	.byte 0x1f, 0, 0, 0
269
270.text
271
272.align 16
273.global __cast6_enc_blk_8way
274.type   __cast6_enc_blk_8way,@function;
275
276__cast6_enc_blk_8way:
277	/* input:
278	 *	%rdi: ctx, CTX
279	 *	%rsi: dst
280	 *	%rdx: src
281	 *	%rcx: bool, if true: xor output
282	 */
283
284	pushq %rbp;
285	pushq %rbx;
286	pushq %rcx;
287
288	vmovdqa .Lbswap_mask, RKM;
289	vmovd .Lfirst_mask, R1ST;
290	vmovd .L32_mask, R32;
291
292	leaq (4*4*4)(%rdx), %rax;
293	inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
294	inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
295
296	movq %rsi, %r11;
297
298	preload_rkr(0, dummy, none);
299	Q(0);
300	Q(1);
301	Q(2);
302	Q(3);
303	preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
304	Q(4);
305	Q(5);
306	QBAR(6);
307	QBAR(7);
308	preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
309	QBAR(8);
310	QBAR(9);
311	QBAR(10);
312	QBAR(11);
313
314	popq %rcx;
315	popq %rbx;
316	popq %rbp;
317
318	vmovdqa .Lbswap_mask, RKM;
319	leaq (4*4*4)(%r11), %rax;
320
321	testb %cl, %cl;
322	jnz __enc_xor8;
323
324	outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
325	outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
326
327	ret;
328
329__enc_xor8:
330	outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
331	outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
332
333	ret;
334
335.align 16
336.global cast6_dec_blk_8way
337.type   cast6_dec_blk_8way,@function;
338
339cast6_dec_blk_8way:
340	/* input:
341	 *	%rdi: ctx, CTX
342	 *	%rsi: dst
343	 *	%rdx: src
344	 */
345
346	pushq %rbp;
347	pushq %rbx;
348
349	vmovdqa .Lbswap_mask, RKM;
350	vmovd .Lfirst_mask, R1ST;
351	vmovd .L32_mask, R32;
352
353	leaq (4*4*4)(%rdx), %rax;
354	inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
355	inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
356
357	movq %rsi, %r11;
358
359	preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
360	Q(11);
361	Q(10);
362	Q(9);
363	Q(8);
364	preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
365	Q(7);
366	Q(6);
367	QBAR(5);
368	QBAR(4);
369	preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
370	QBAR(3);
371	QBAR(2);
372	QBAR(1);
373	QBAR(0);
374
375	popq %rbx;
376	popq %rbp;
377
378	vmovdqa .Lbswap_mask, RKM;
379	leaq (4*4*4)(%r11), %rax;
380	outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
381	outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
382
383	ret;
384