Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2/*
  3 * Test interface for Jitter RNG.
  4 *
  5 * Copyright (C) 2023, Stephan Mueller <smueller@chronox.de>
  6 */
  7
  8#include <linux/debugfs.h>
  9#include <linux/module.h>
 10#include <linux/uaccess.h>
 11
 12#include "jitterentropy.h"
 13
 14#define JENT_TEST_RINGBUFFER_SIZE	(1<<10)
 15#define JENT_TEST_RINGBUFFER_MASK	(JENT_TEST_RINGBUFFER_SIZE - 1)
 16
 17struct jent_testing {
 18	u64 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
 19	u32 rb_reader;
 20	atomic_t rb_writer;
 21	atomic_t jent_testing_enabled;
 22	spinlock_t lock;
 23	wait_queue_head_t read_wait;
 24};
 25
 26static struct dentry *jent_raw_debugfs_root = NULL;
 27
 28/*************************** Generic Data Handling ****************************/
 29
 30/*
 31 * boot variable:
 32 * 0 ==> No boot test, gathering of runtime data allowed
 33 * 1 ==> Boot test enabled and ready for collecting data, gathering runtime
 34 *	 data is disabled
 35 * 2 ==> Boot test completed and disabled, gathering of runtime data is
 36 *	 disabled
 37 */
 38
 39static void jent_testing_reset(struct jent_testing *data)
 40{
 41	unsigned long flags;
 42
 43	spin_lock_irqsave(&data->lock, flags);
 44	data->rb_reader = 0;
 45	atomic_set(&data->rb_writer, 0);
 46	spin_unlock_irqrestore(&data->lock, flags);
 47}
 48
 49static void jent_testing_data_init(struct jent_testing *data, u32 boot)
 50{
 51	/*
 52	 * The boot time testing implies we have a running test. If the
 53	 * caller wants to clear it, he has to unset the boot_test flag
 54	 * at runtime via sysfs to enable regular runtime testing
 55	 */
 56	if (boot)
 57		return;
 58
 59	jent_testing_reset(data);
 60	atomic_set(&data->jent_testing_enabled, 1);
 61	pr_warn("Enabling data collection\n");
 62}
 63
 64static void jent_testing_fini(struct jent_testing *data, u32 boot)
 65{
 66	/* If we have boot data, we do not reset yet to allow data to be read */
 67	if (boot)
 68		return;
 69
 70	atomic_set(&data->jent_testing_enabled, 0);
 71	jent_testing_reset(data);
 72	pr_warn("Disabling data collection\n");
 73}
 74
 75static bool jent_testing_store(struct jent_testing *data, u64 value,
 76			       u32 *boot)
 77{
 78	unsigned long flags;
 79
 80	if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1))
 81		return false;
 82
 83	spin_lock_irqsave(&data->lock, flags);
 84
 85	/*
 86	 * Disable entropy testing for boot time testing after ring buffer
 87	 * is filled.
 88	 */
 89	if (*boot) {
 90		if (((u32)atomic_read(&data->rb_writer)) >
 91		     JENT_TEST_RINGBUFFER_SIZE) {
 92			*boot = 2;
 93			pr_warn_once("One time data collection test disabled\n");
 94			spin_unlock_irqrestore(&data->lock, flags);
 95			return false;
 96		}
 97
 98		if (atomic_read(&data->rb_writer) == 1)
 99			pr_warn("One time data collection test enabled\n");
100	}
101
102	data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) &
103			      JENT_TEST_RINGBUFFER_MASK] = value;
104	atomic_inc(&data->rb_writer);
105
106	spin_unlock_irqrestore(&data->lock, flags);
107
108	if (wq_has_sleeper(&data->read_wait))
109		wake_up_interruptible(&data->read_wait);
110
111	return true;
112}
113
114static bool jent_testing_have_data(struct jent_testing *data)
115{
116	return ((((u32)atomic_read(&data->rb_writer)) &
117		 JENT_TEST_RINGBUFFER_MASK) !=
118		 (data->rb_reader & JENT_TEST_RINGBUFFER_MASK));
119}
120
121static int jent_testing_reader(struct jent_testing *data, u32 *boot,
122			       u8 *outbuf, u32 outbuflen)
123{
124	unsigned long flags;
125	int collected_data = 0;
126
127	jent_testing_data_init(data, *boot);
128
129	while (outbuflen) {
130		u32 writer = (u32)atomic_read(&data->rb_writer);
131
132		spin_lock_irqsave(&data->lock, flags);
133
134		/* We have no data or reached the writer. */
135		if (!writer || (writer == data->rb_reader)) {
136
137			spin_unlock_irqrestore(&data->lock, flags);
138
139			/*
140			 * Now we gathered all boot data, enable regular data
141			 * collection.
142			 */
143			if (*boot) {
144				*boot = 0;
145				goto out;
146			}
147
148			wait_event_interruptible(data->read_wait,
149						 jent_testing_have_data(data));
150			if (signal_pending(current)) {
151				collected_data = -ERESTARTSYS;
152				goto out;
153			}
154
155			continue;
156		}
157
158		/* We copy out word-wise */
159		if (outbuflen < sizeof(u64)) {
160			spin_unlock_irqrestore(&data->lock, flags);
161			goto out;
162		}
163
164		memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
165		       sizeof(u64));
166		data->rb_reader++;
167
168		spin_unlock_irqrestore(&data->lock, flags);
169
170		outbuf += sizeof(u64);
171		outbuflen -= sizeof(u64);
172		collected_data += sizeof(u64);
173	}
174
175out:
176	jent_testing_fini(data, *boot);
177	return collected_data;
178}
179
180static int jent_testing_extract_user(struct file *file, char __user *buf,
181				     size_t nbytes, loff_t *ppos,
182				     int (*reader)(u8 *outbuf, u32 outbuflen))
183{
184	u8 *tmp, *tmp_aligned;
185	int ret = 0, large_request = (nbytes > 256);
186
187	if (!nbytes)
188		return 0;
189
190	/*
191	 * The intention of this interface is for collecting at least
192	 * 1000 samples due to the SP800-90B requirements. However, due to
193	 * memory and performance constraints, it is not desirable to allocate
194	 * 8000 bytes of memory. Instead, we allocate space for only 125
195	 * samples, which will allow the user to collect all 1000 samples using
196	 * 8 calls to this interface.
197	 */
198	tmp = kmalloc(125 * sizeof(u64) + sizeof(u64), GFP_KERNEL);
199	if (!tmp)
200		return -ENOMEM;
201
202	tmp_aligned = PTR_ALIGN(tmp, sizeof(u64));
203
204	while (nbytes) {
205		int i;
206
207		if (large_request && need_resched()) {
208			if (signal_pending(current)) {
209				if (ret == 0)
210					ret = -ERESTARTSYS;
211				break;
212			}
213			schedule();
214		}
215
216		i = min_t(int, nbytes, 125 * sizeof(u64));
217		i = reader(tmp_aligned, i);
218		if (i <= 0) {
219			if (i < 0)
220				ret = i;
221			break;
222		}
223		if (copy_to_user(buf, tmp_aligned, i)) {
224			ret = -EFAULT;
225			break;
226		}
227
228		nbytes -= i;
229		buf += i;
230		ret += i;
231	}
232
233	kfree_sensitive(tmp);
234
235	if (ret > 0)
236		*ppos += ret;
237
238	return ret;
239}
240
241/************** Raw High-Resolution Timer Entropy Data Handling **************/
242
243static u32 boot_raw_hires_test = 0;
244module_param(boot_raw_hires_test, uint, 0644);
245MODULE_PARM_DESC(boot_raw_hires_test,
246		 "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events");
247
248static struct jent_testing jent_raw_hires = {
249	.rb_reader = 0,
250	.rb_writer = ATOMIC_INIT(0),
251	.lock      = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock),
252	.read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
253};
254
255int jent_raw_hires_entropy_store(__u64 value)
256{
257	return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
258}
259EXPORT_SYMBOL(jent_raw_hires_entropy_store);
260
261static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
262{
263	return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test,
264				   outbuf, outbuflen);
265}
266
267static ssize_t jent_raw_hires_read(struct file *file, char __user *to,
268				   size_t count, loff_t *ppos)
269{
270	return jent_testing_extract_user(file, to, count, ppos,
271					 jent_raw_hires_entropy_reader);
272}
273
274static const struct file_operations jent_raw_hires_fops = {
275	.owner = THIS_MODULE,
276	.read = jent_raw_hires_read,
277};
278
279/******************************* Initialization *******************************/
280
281void jent_testing_init(void)
282{
283	jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
284
285	debugfs_create_file_unsafe("jent_raw_hires", 0400,
286				   jent_raw_debugfs_root, NULL,
287				   &jent_raw_hires_fops);
288}
289EXPORT_SYMBOL(jent_testing_init);
290
291void jent_testing_exit(void)
292{
293	debugfs_remove_recursive(jent_raw_debugfs_root);
294}
295EXPORT_SYMBOL(jent_testing_exit);