xref: /openbmc/linux/lib/raid6/recov_avx2.c (revision 0e96cf7f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Intel Corporation
4  * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
5  */
6 
7 #ifdef CONFIG_AS_AVX2
8 
9 #include <linux/raid/pq.h>
10 #include "x86.h"
11 
12 static int raid6_has_avx2(void)
13 {
14 	return boot_cpu_has(X86_FEATURE_AVX2) &&
15 		boot_cpu_has(X86_FEATURE_AVX);
16 }
17 
18 static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
19 		int failb, void **ptrs)
20 {
21 	u8 *p, *q, *dp, *dq;
22 	const u8 *pbmul;	/* P multiplier table for B data */
23 	const u8 *qmul;		/* Q multiplier table (for both) */
24 	const u8 x0f = 0x0f;
25 
26 	p = (u8 *)ptrs[disks-2];
27 	q = (u8 *)ptrs[disks-1];
28 
29 	/* Compute syndrome with zero for the missing data pages
30 	   Use the dead data pages as temporary storage for
31 	   delta p and delta q */
32 	dp = (u8 *)ptrs[faila];
33 	ptrs[faila] = (void *)raid6_empty_zero_page;
34 	ptrs[disks-2] = dp;
35 	dq = (u8 *)ptrs[failb];
36 	ptrs[failb] = (void *)raid6_empty_zero_page;
37 	ptrs[disks-1] = dq;
38 
39 	raid6_call.gen_syndrome(disks, bytes, ptrs);
40 
41 	/* Restore pointer table */
42 	ptrs[faila]   = dp;
43 	ptrs[failb]   = dq;
44 	ptrs[disks-2] = p;
45 	ptrs[disks-1] = q;
46 
47 	/* Now, pick the proper data tables */
48 	pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
49 	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
50 		raid6_gfexp[failb]]];
51 
52 	kernel_fpu_begin();
53 
54 	/* ymm0 = x0f[16] */
55 	asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
56 
57 	while (bytes) {
58 #ifdef CONFIG_X86_64
59 		asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
60 		asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
61 		asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
62 		asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
63 		asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
64 		asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
65 		asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
66 		asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
67 
68 		/*
69 		 * 1 = dq[0]  ^ q[0]
70 		 * 9 = dq[32] ^ q[32]
71 		 * 0 = dp[0]  ^ p[0]
72 		 * 8 = dp[32] ^ p[32]
73 		 */
74 
75 		asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
76 		asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
77 
78 		asm volatile("vpsraw $4, %ymm1, %ymm3");
79 		asm volatile("vpsraw $4, %ymm9, %ymm12");
80 		asm volatile("vpand %ymm7, %ymm1, %ymm1");
81 		asm volatile("vpand %ymm7, %ymm9, %ymm9");
82 		asm volatile("vpand %ymm7, %ymm3, %ymm3");
83 		asm volatile("vpand %ymm7, %ymm12, %ymm12");
84 		asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
85 		asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
86 		asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
87 		asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
88 		asm volatile("vpxor %ymm14, %ymm15, %ymm15");
89 		asm volatile("vpxor %ymm4, %ymm5, %ymm5");
90 
91 		/*
92 		 * 5 = qx[0]
93 		 * 15 = qx[32]
94 		 */
95 
96 		asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
97 		asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
98 		asm volatile("vpsraw $4, %ymm0, %ymm2");
99 		asm volatile("vpsraw $4, %ymm8, %ymm6");
100 		asm volatile("vpand %ymm7, %ymm0, %ymm3");
101 		asm volatile("vpand %ymm7, %ymm8, %ymm14");
102 		asm volatile("vpand %ymm7, %ymm2, %ymm2");
103 		asm volatile("vpand %ymm7, %ymm6, %ymm6");
104 		asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
105 		asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
106 		asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
107 		asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
108 		asm volatile("vpxor %ymm4, %ymm1, %ymm1");
109 		asm volatile("vpxor %ymm12, %ymm13, %ymm13");
110 
111 		/*
112 		 * 1  = pbmul[px[0]]
113 		 * 13 = pbmul[px[32]]
114 		 */
115 		asm volatile("vpxor %ymm5, %ymm1, %ymm1");
116 		asm volatile("vpxor %ymm15, %ymm13, %ymm13");
117 
118 		/*
119 		 * 1 = db = DQ
120 		 * 13 = db[32] = DQ[32]
121 		 */
122 		asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
123 		asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
124 		asm volatile("vpxor %ymm1, %ymm0, %ymm0");
125 		asm volatile("vpxor %ymm13, %ymm8, %ymm8");
126 
127 		asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
128 		asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
129 
130 		bytes -= 64;
131 		p += 64;
132 		q += 64;
133 		dp += 64;
134 		dq += 64;
135 #else
136 		asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
137 		asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
138 		asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
139 		asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
140 
141 		/* 1 = dq ^ q;  0 = dp ^ p */
142 
143 		asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
144 		asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
145 
146 		/*
147 		 * 1 = dq ^ q
148 		 * 3 = dq ^ p >> 4
149 		 */
150 		asm volatile("vpsraw $4, %ymm1, %ymm3");
151 		asm volatile("vpand %ymm7, %ymm1, %ymm1");
152 		asm volatile("vpand %ymm7, %ymm3, %ymm3");
153 		asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
154 		asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
155 		asm volatile("vpxor %ymm4, %ymm5, %ymm5");
156 
157 		/* 5 = qx */
158 
159 		asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
160 		asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
161 
162 		asm volatile("vpsraw $4, %ymm0, %ymm2");
163 		asm volatile("vpand %ymm7, %ymm0, %ymm3");
164 		asm volatile("vpand %ymm7, %ymm2, %ymm2");
165 		asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
166 		asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
167 		asm volatile("vpxor %ymm4, %ymm1, %ymm1");
168 
169 		/* 1 = pbmul[px] */
170 		asm volatile("vpxor %ymm5, %ymm1, %ymm1");
171 		/* 1 = db = DQ */
172 		asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
173 
174 		asm volatile("vpxor %ymm1, %ymm0, %ymm0");
175 		asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
176 
177 		bytes -= 32;
178 		p += 32;
179 		q += 32;
180 		dp += 32;
181 		dq += 32;
182 #endif
183 	}
184 
185 	kernel_fpu_end();
186 }
187 
188 static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
189 		void **ptrs)
190 {
191 	u8 *p, *q, *dq;
192 	const u8 *qmul;		/* Q multiplier table */
193 	const u8 x0f = 0x0f;
194 
195 	p = (u8 *)ptrs[disks-2];
196 	q = (u8 *)ptrs[disks-1];
197 
198 	/* Compute syndrome with zero for the missing data page
199 	   Use the dead data page as temporary storage for delta q */
200 	dq = (u8 *)ptrs[faila];
201 	ptrs[faila] = (void *)raid6_empty_zero_page;
202 	ptrs[disks-1] = dq;
203 
204 	raid6_call.gen_syndrome(disks, bytes, ptrs);
205 
206 	/* Restore pointer table */
207 	ptrs[faila]   = dq;
208 	ptrs[disks-1] = q;
209 
210 	/* Now, pick the proper data tables */
211 	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
212 
213 	kernel_fpu_begin();
214 
215 	asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
216 
217 	while (bytes) {
218 #ifdef CONFIG_X86_64
219 		asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
220 		asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
221 		asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
222 		asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
223 
224 		/*
225 		 * 3 = q[0] ^ dq[0]
226 		 * 8 = q[32] ^ dq[32]
227 		 */
228 		asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
229 		asm volatile("vmovapd %ymm0, %ymm13");
230 		asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
231 		asm volatile("vmovapd %ymm1, %ymm14");
232 
233 		asm volatile("vpsraw $4, %ymm3, %ymm6");
234 		asm volatile("vpsraw $4, %ymm8, %ymm12");
235 		asm volatile("vpand %ymm7, %ymm3, %ymm3");
236 		asm volatile("vpand %ymm7, %ymm8, %ymm8");
237 		asm volatile("vpand %ymm7, %ymm6, %ymm6");
238 		asm volatile("vpand %ymm7, %ymm12, %ymm12");
239 		asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
240 		asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
241 		asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
242 		asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
243 		asm volatile("vpxor %ymm0, %ymm1, %ymm1");
244 		asm volatile("vpxor %ymm13, %ymm14, %ymm14");
245 
246 		/*
247 		 * 1  = qmul[q[0]  ^ dq[0]]
248 		 * 14 = qmul[q[32] ^ dq[32]]
249 		 */
250 		asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
251 		asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
252 		asm volatile("vpxor %ymm1, %ymm2, %ymm2");
253 		asm volatile("vpxor %ymm14, %ymm12, %ymm12");
254 
255 		/*
256 		 * 2  = p[0]  ^ qmul[q[0]  ^ dq[0]]
257 		 * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
258 		 */
259 
260 		asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
261 		asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
262 		asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
263 		asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
264 
265 		bytes -= 64;
266 		p += 64;
267 		q += 64;
268 		dq += 64;
269 #else
270 		asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
271 		asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
272 
273 		/* 3 = q ^ dq */
274 
275 		asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
276 		asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
277 
278 		asm volatile("vpsraw $4, %ymm3, %ymm6");
279 		asm volatile("vpand %ymm7, %ymm3, %ymm3");
280 		asm volatile("vpand %ymm7, %ymm6, %ymm6");
281 		asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
282 		asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
283 		asm volatile("vpxor %ymm0, %ymm1, %ymm1");
284 
285 		/* 1 = qmul[q ^ dq] */
286 
287 		asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
288 		asm volatile("vpxor %ymm1, %ymm2, %ymm2");
289 
290 		/* 2 = p ^ qmul[q ^ dq] */
291 
292 		asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
293 		asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
294 
295 		bytes -= 32;
296 		p += 32;
297 		q += 32;
298 		dq += 32;
299 #endif
300 	}
301 
302 	kernel_fpu_end();
303 }
304 
305 const struct raid6_recov_calls raid6_recov_avx2 = {
306 	.data2 = raid6_2data_recov_avx2,
307 	.datap = raid6_datap_recov_avx2,
308 	.valid = raid6_has_avx2,
309 #ifdef CONFIG_X86_64
310 	.name = "avx2x2",
311 #else
312 	.name = "avx2x1",
313 #endif
314 	.priority = 2,
315 };
316 
317 #else
318 #warning "your version of binutils lacks AVX2 support"
319 #endif
320