xref: /openbmc/linux/crypto/async_tx/raid6test.c (revision cbdf59ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * asynchronous raid6 recovery self test
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * based on drivers/md/raid6test/test.c:
7  * 	Copyright 2002-2007 H. Peter Anvin
8  */
9 #include <linux/async_tx.h>
10 #include <linux/gfp.h>
11 #include <linux/mm.h>
12 #include <linux/random.h>
13 #include <linux/module.h>
14 
15 #undef pr
16 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
17 
18 #define NDISKS 64 /* Including P and Q */
19 
20 static struct page *dataptrs[NDISKS];
21 static addr_conv_t addr_conv[NDISKS];
22 static struct page *data[NDISKS+3];
23 static struct page *spare;
24 static struct page *recovi;
25 static struct page *recovj;
26 
27 static void callback(void *param)
28 {
29 	struct completion *cmp = param;
30 
31 	complete(cmp);
32 }
33 
34 static void makedata(int disks)
35 {
36 	int i;
37 
38 	for (i = 0; i < disks; i++) {
39 		prandom_bytes(page_address(data[i]), PAGE_SIZE);
40 		dataptrs[i] = data[i];
41 	}
42 }
43 
44 static char disk_type(int d, int disks)
45 {
46 	if (d == disks - 2)
47 		return 'P';
48 	else if (d == disks - 1)
49 		return 'Q';
50 	else
51 		return 'D';
52 }
53 
54 /* Recover two failed blocks. */
55 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
56 {
57 	struct async_submit_ctl submit;
58 	struct completion cmp;
59 	struct dma_async_tx_descriptor *tx = NULL;
60 	enum sum_check_flags result = ~0;
61 
62 	if (faila > failb)
63 		swap(faila, failb);
64 
65 	if (failb == disks-1) {
66 		if (faila == disks-2) {
67 			/* P+Q failure.  Just rebuild the syndrome. */
68 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
69 			tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
70 		} else {
71 			struct page *blocks[NDISKS];
72 			struct page *dest;
73 			int count = 0;
74 			int i;
75 
76 			BUG_ON(disks > NDISKS);
77 
78 			/* data+Q failure.  Reconstruct data from P,
79 			 * then rebuild syndrome
80 			 */
81 			for (i = disks; i-- ; ) {
82 				if (i == faila || i == failb)
83 					continue;
84 				blocks[count++] = ptrs[i];
85 			}
86 			dest = ptrs[faila];
87 			init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
88 					  NULL, NULL, addr_conv);
89 			tx = async_xor(dest, blocks, 0, count, bytes, &submit);
90 
91 			init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
92 			tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
93 		}
94 	} else {
95 		if (failb == disks-2) {
96 			/* data+P failure. */
97 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
98 			tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
99 		} else {
100 			/* data+data failure. */
101 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
102 			tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
103 		}
104 	}
105 	init_completion(&cmp);
106 	init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
107 	tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
108 	async_tx_issue_pending(tx);
109 
110 	if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
111 		pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
112 		   __func__, faila, failb, disks);
113 
114 	if (result != 0)
115 		pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
116 		   __func__, faila, failb, result);
117 }
118 
119 static int test_disks(int i, int j, int disks)
120 {
121 	int erra, errb;
122 
123 	memset(page_address(recovi), 0xf0, PAGE_SIZE);
124 	memset(page_address(recovj), 0xba, PAGE_SIZE);
125 
126 	dataptrs[i] = recovi;
127 	dataptrs[j] = recovj;
128 
129 	raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
130 
131 	erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
132 	errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
133 
134 	pr("%s(%d, %d): faila=%3d(%c)  failb=%3d(%c)  %s\n",
135 	   __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
136 	   (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
137 
138 	dataptrs[i] = data[i];
139 	dataptrs[j] = data[j];
140 
141 	return erra || errb;
142 }
143 
144 static int test(int disks, int *tests)
145 {
146 	struct dma_async_tx_descriptor *tx;
147 	struct async_submit_ctl submit;
148 	struct completion cmp;
149 	int err = 0;
150 	int i, j;
151 
152 	recovi = data[disks];
153 	recovj = data[disks+1];
154 	spare  = data[disks+2];
155 
156 	makedata(disks);
157 
158 	/* Nuke syndromes */
159 	memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
160 	memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
161 
162 	/* Generate assumed good syndrome */
163 	init_completion(&cmp);
164 	init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
165 	tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
166 	async_tx_issue_pending(tx);
167 
168 	if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
169 		pr("error: initial gen_syndrome(%d) timed out\n", disks);
170 		return 1;
171 	}
172 
173 	pr("testing the %d-disk case...\n", disks);
174 	for (i = 0; i < disks-1; i++)
175 		for (j = i+1; j < disks; j++) {
176 			(*tests)++;
177 			err += test_disks(i, j, disks);
178 		}
179 
180 	return err;
181 }
182 
183 
184 static int raid6_test(void)
185 {
186 	int err = 0;
187 	int tests = 0;
188 	int i;
189 
190 	for (i = 0; i < NDISKS+3; i++) {
191 		data[i] = alloc_page(GFP_KERNEL);
192 		if (!data[i]) {
193 			while (i--)
194 				put_page(data[i]);
195 			return -ENOMEM;
196 		}
197 	}
198 
199 	/* the 4-disk and 5-disk cases are special for the recovery code */
200 	if (NDISKS > 4)
201 		err += test(4, &tests);
202 	if (NDISKS > 5)
203 		err += test(5, &tests);
204 	/* the 11 and 12 disk cases are special for ioatdma (p-disabled
205 	 * q-continuation without extended descriptor)
206 	 */
207 	if (NDISKS > 12) {
208 		err += test(11, &tests);
209 		err += test(12, &tests);
210 	}
211 
212 	/* the 24 disk case is special for ioatdma as it is the boudary point
213 	 * at which it needs to switch from 8-source ops to 16-source
214 	 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
215 	 */
216 	if (NDISKS > 24)
217 		err += test(24, &tests);
218 
219 	err += test(NDISKS, &tests);
220 
221 	pr("\n");
222 	pr("complete (%d tests, %d failure%s)\n",
223 	   tests, err, err == 1 ? "" : "s");
224 
225 	for (i = 0; i < NDISKS+3; i++)
226 		put_page(data[i]);
227 
228 	return 0;
229 }
230 
231 static void raid6_test_exit(void)
232 {
233 }
234 
235 /* when compiled-in wait for drivers to load first (assumes dma drivers
236  * are also compliled-in)
237  */
238 late_initcall(raid6_test);
239 module_exit(raid6_test_exit);
240 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
241 MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
242 MODULE_LICENSE("GPL");
243