1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Test interface for Jitter RNG.
4  *
5  * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
11 
12 #include "jitterentropy.h"
13 
14 #define JENT_TEST_RINGBUFFER_SIZE	(1<<10)
15 #define JENT_TEST_RINGBUFFER_MASK	(JENT_TEST_RINGBUFFER_SIZE - 1)
16 
17 struct jent_testing {
18 	u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
19 	u32 rb_reader;
20 	atomic_t rb_writer;
21 	atomic_t jent_testing_enabled;
22 	spinlock_t lock;
23 	wait_queue_head_t read_wait;
24 };
25 
26 static struct dentry *jent_raw_debugfs_root = NULL;
27 
28 /*************************** Generic Data Handling ****************************/
29 
30 /*
31  * boot variable:
32  * 0 ==> No boot test, gathering of runtime data allowed
33  * 1 ==> Boot test enabled and ready for collecting data, gathering runtime
34  *	 data is disabled
35  * 2 ==> Boot test completed and disabled, gathering of runtime data is
36  *	 disabled
37  */
38 
jent_testing_reset(struct jent_testing * data)39 static void jent_testing_reset(struct jent_testing *data)
40 {
41 	unsigned long flags;
42 
43 	spin_lock_irqsave(&data->lock, flags);
44 	data->rb_reader = 0;
45 	atomic_set(&data->rb_writer, 0);
46 	spin_unlock_irqrestore(&data->lock, flags);
47 }
48 
jent_testing_data_init(struct jent_testing * data,u32 boot)49 static void jent_testing_data_init(struct jent_testing *data, u32 boot)
50 {
51 	/*
52 	 * The boot time testing implies we have a running test. If the
53 	 * caller wants to clear it, he has to unset the boot_test flag
54 	 * at runtime via sysfs to enable regular runtime testing
55 	 */
56 	if (boot)
57 		return;
58 
59 	jent_testing_reset(data);
60 	atomic_set(&data->jent_testing_enabled, 1);
61 	pr_warn("Enabling data collection\n");
62 }
63 
jent_testing_fini(struct jent_testing * data,u32 boot)64 static void jent_testing_fini(struct jent_testing *data, u32 boot)
65 {
66 	/* If we have boot data, we do not reset yet to allow data to be read */
67 	if (boot)
68 		return;
69 
70 	atomic_set(&data->jent_testing_enabled, 0);
71 	jent_testing_reset(data);
72 	pr_warn("Disabling data collection\n");
73 }
74 
jent_testing_store(struct jent_testing * data,u32 value,u32 * boot)75 static bool jent_testing_store(struct jent_testing *data, u32 value,
76 			       u32 *boot)
77 {
78 	unsigned long flags;
79 
80 	if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1))
81 		return false;
82 
83 	spin_lock_irqsave(&data->lock, flags);
84 
85 	/*
86 	 * Disable entropy testing for boot time testing after ring buffer
87 	 * is filled.
88 	 */
89 	if (*boot) {
90 		if (((u32)atomic_read(&data->rb_writer)) >
91 		     JENT_TEST_RINGBUFFER_SIZE) {
92 			*boot = 2;
93 			pr_warn_once("One time data collection test disabled\n");
94 			spin_unlock_irqrestore(&data->lock, flags);
95 			return false;
96 		}
97 
98 		if (atomic_read(&data->rb_writer) == 1)
99 			pr_warn("One time data collection test enabled\n");
100 	}
101 
102 	data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) &
103 			      JENT_TEST_RINGBUFFER_MASK] = value;
104 	atomic_inc(&data->rb_writer);
105 
106 	spin_unlock_irqrestore(&data->lock, flags);
107 
108 	if (wq_has_sleeper(&data->read_wait))
109 		wake_up_interruptible(&data->read_wait);
110 
111 	return true;
112 }
113 
jent_testing_have_data(struct jent_testing * data)114 static bool jent_testing_have_data(struct jent_testing *data)
115 {
116 	return ((((u32)atomic_read(&data->rb_writer)) &
117 		 JENT_TEST_RINGBUFFER_MASK) !=
118 		 (data->rb_reader & JENT_TEST_RINGBUFFER_MASK));
119 }
120 
jent_testing_reader(struct jent_testing * data,u32 * boot,u8 * outbuf,u32 outbuflen)121 static int jent_testing_reader(struct jent_testing *data, u32 *boot,
122 			       u8 *outbuf, u32 outbuflen)
123 {
124 	unsigned long flags;
125 	int collected_data = 0;
126 
127 	jent_testing_data_init(data, *boot);
128 
129 	while (outbuflen) {
130 		u32 writer = (u32)atomic_read(&data->rb_writer);
131 
132 		spin_lock_irqsave(&data->lock, flags);
133 
134 		/* We have no data or reached the writer. */
135 		if (!writer || (writer == data->rb_reader)) {
136 
137 			spin_unlock_irqrestore(&data->lock, flags);
138 
139 			/*
140 			 * Now we gathered all boot data, enable regular data
141 			 * collection.
142 			 */
143 			if (*boot) {
144 				*boot = 0;
145 				goto out;
146 			}
147 
148 			wait_event_interruptible(data->read_wait,
149 						 jent_testing_have_data(data));
150 			if (signal_pending(current)) {
151 				collected_data = -ERESTARTSYS;
152 				goto out;
153 			}
154 
155 			continue;
156 		}
157 
158 		/* We copy out word-wise */
159 		if (outbuflen < sizeof(u32)) {
160 			spin_unlock_irqrestore(&data->lock, flags);
161 			goto out;
162 		}
163 
164 		memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
165 		       sizeof(u32));
166 		data->rb_reader++;
167 
168 		spin_unlock_irqrestore(&data->lock, flags);
169 
170 		outbuf += sizeof(u32);
171 		outbuflen -= sizeof(u32);
172 		collected_data += sizeof(u32);
173 	}
174 
175 out:
176 	jent_testing_fini(data, *boot);
177 	return collected_data;
178 }
179 
jent_testing_extract_user(struct file * file,char __user * buf,size_t nbytes,loff_t * ppos,int (* reader)(u8 * outbuf,u32 outbuflen))180 static int jent_testing_extract_user(struct file *file, char __user *buf,
181 				     size_t nbytes, loff_t *ppos,
182 				     int (*reader)(u8 *outbuf, u32 outbuflen))
183 {
184 	u8 *tmp, *tmp_aligned;
185 	int ret = 0, large_request = (nbytes > 256);
186 
187 	if (!nbytes)
188 		return 0;
189 
190 	/*
191 	 * The intention of this interface is for collecting at least
192 	 * 1000 samples due to the SP800-90B requirements. So, we make no
193 	 * effort in avoiding allocating more memory that actually needed
194 	 * by the user. Hence, we allocate sufficient memory to always hold
195 	 * that amount of data.
196 	 */
197 	tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL);
198 	if (!tmp)
199 		return -ENOMEM;
200 
201 	tmp_aligned = PTR_ALIGN(tmp, sizeof(u32));
202 
203 	while (nbytes) {
204 		int i;
205 
206 		if (large_request && need_resched()) {
207 			if (signal_pending(current)) {
208 				if (ret == 0)
209 					ret = -ERESTARTSYS;
210 				break;
211 			}
212 			schedule();
213 		}
214 
215 		i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE);
216 		i = reader(tmp_aligned, i);
217 		if (i <= 0) {
218 			if (i < 0)
219 				ret = i;
220 			break;
221 		}
222 		if (copy_to_user(buf, tmp_aligned, i)) {
223 			ret = -EFAULT;
224 			break;
225 		}
226 
227 		nbytes -= i;
228 		buf += i;
229 		ret += i;
230 	}
231 
232 	kfree_sensitive(tmp);
233 
234 	if (ret > 0)
235 		*ppos += ret;
236 
237 	return ret;
238 }
239 
240 /************** Raw High-Resolution Timer Entropy Data Handling **************/
241 
242 static u32 boot_raw_hires_test = 0;
243 module_param(boot_raw_hires_test, uint, 0644);
244 MODULE_PARM_DESC(boot_raw_hires_test,
245 		 "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events");
246 
247 static struct jent_testing jent_raw_hires = {
248 	.rb_reader = 0,
249 	.rb_writer = ATOMIC_INIT(0),
250 	.lock      = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock),
251 	.read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
252 };
253 
jent_raw_hires_entropy_store(__u32 value)254 int jent_raw_hires_entropy_store(__u32 value)
255 {
256 	return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
257 }
258 EXPORT_SYMBOL(jent_raw_hires_entropy_store);
259 
jent_raw_hires_entropy_reader(u8 * outbuf,u32 outbuflen)260 static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
261 {
262 	return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test,
263 				   outbuf, outbuflen);
264 }
265 
jent_raw_hires_read(struct file * file,char __user * to,size_t count,loff_t * ppos)266 static ssize_t jent_raw_hires_read(struct file *file, char __user *to,
267 				   size_t count, loff_t *ppos)
268 {
269 	return jent_testing_extract_user(file, to, count, ppos,
270 					 jent_raw_hires_entropy_reader);
271 }
272 
273 static const struct file_operations jent_raw_hires_fops = {
274 	.owner = THIS_MODULE,
275 	.read = jent_raw_hires_read,
276 };
277 
278 /******************************* Initialization *******************************/
279 
jent_testing_init(void)280 void jent_testing_init(void)
281 {
282 	jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
283 
284 	debugfs_create_file_unsafe("jent_raw_hires", 0400,
285 				   jent_raw_debugfs_root, NULL,
286 				   &jent_raw_hires_fops);
287 }
288 EXPORT_SYMBOL(jent_testing_init);
289 
jent_testing_exit(void)290 void jent_testing_exit(void)
291 {
292 	debugfs_remove_recursive(jent_raw_debugfs_root);
293 }
294 EXPORT_SYMBOL(jent_testing_exit);
295