Loading...
1/*
2 * hw_random/core.c: HWRNG core API
3 *
4 * Copyright 2006 Michael Buesch <m@bues.ch>
5 * Copyright 2005 (c) MontaVista Software, Inc.
6 *
7 * Please read Documentation/admin-guide/hw_random.rst for details on use.
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/err.h>
16#include <linux/fs.h>
17#include <linux/hw_random.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
20#include <linux/miscdevice.h>
21#include <linux/module.h>
22#include <linux/random.h>
23#include <linux/sched.h>
24#include <linux/sched/signal.h>
25#include <linux/slab.h>
26#include <linux/string.h>
27#include <linux/uaccess.h>
28
29#define RNG_MODULE_NAME "hw_random"
30
31#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
32
33static struct hwrng *current_rng;
34/* the current rng has been explicitly chosen by user via sysfs */
35static int cur_rng_set_by_user;
36static struct task_struct *hwrng_fill;
37/* list of registered rngs */
38static LIST_HEAD(rng_list);
39/* Protects rng_list and current_rng */
40static DEFINE_MUTEX(rng_mutex);
41/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
42static DEFINE_MUTEX(reading_mutex);
43static int data_avail;
44static u8 *rng_buffer, *rng_fillbuf;
45static unsigned short current_quality;
46static unsigned short default_quality = 1024; /* default to maximum */
47
48module_param(current_quality, ushort, 0644);
49MODULE_PARM_DESC(current_quality,
50 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
51module_param(default_quality, ushort, 0644);
52MODULE_PARM_DESC(default_quality,
53 "default maximum entropy content of hwrng per 1024 bits of input");
54
55static void drop_current_rng(void);
56static int hwrng_init(struct hwrng *rng);
57static int hwrng_fillfn(void *unused);
58
59static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
60 int wait);
61
62static size_t rng_buffer_size(void)
63{
64 return RNG_BUFFER_SIZE;
65}
66
67static inline void cleanup_rng(struct kref *kref)
68{
69 struct hwrng *rng = container_of(kref, struct hwrng, ref);
70
71 if (rng->cleanup)
72 rng->cleanup(rng);
73
74 complete(&rng->cleanup_done);
75}
76
77static int set_current_rng(struct hwrng *rng)
78{
79 int err;
80
81 BUG_ON(!mutex_is_locked(&rng_mutex));
82
83 err = hwrng_init(rng);
84 if (err)
85 return err;
86
87 drop_current_rng();
88 current_rng = rng;
89
90 /* if necessary, start hwrng thread */
91 if (!hwrng_fill) {
92 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
93 if (IS_ERR(hwrng_fill)) {
94 pr_err("hwrng_fill thread creation failed\n");
95 hwrng_fill = NULL;
96 }
97 }
98
99 return 0;
100}
101
102static void drop_current_rng(void)
103{
104 BUG_ON(!mutex_is_locked(&rng_mutex));
105 if (!current_rng)
106 return;
107
108 /* decrease last reference for triggering the cleanup */
109 kref_put(¤t_rng->ref, cleanup_rng);
110 current_rng = NULL;
111}
112
113/* Returns ERR_PTR(), NULL or refcounted hwrng */
114static struct hwrng *get_current_rng_nolock(void)
115{
116 if (current_rng)
117 kref_get(¤t_rng->ref);
118
119 return current_rng;
120}
121
122static struct hwrng *get_current_rng(void)
123{
124 struct hwrng *rng;
125
126 if (mutex_lock_interruptible(&rng_mutex))
127 return ERR_PTR(-ERESTARTSYS);
128
129 rng = get_current_rng_nolock();
130
131 mutex_unlock(&rng_mutex);
132 return rng;
133}
134
135static void put_rng(struct hwrng *rng)
136{
137 /*
138 * Hold rng_mutex here so we serialize in case they set_current_rng
139 * on rng again immediately.
140 */
141 mutex_lock(&rng_mutex);
142 if (rng)
143 kref_put(&rng->ref, cleanup_rng);
144 mutex_unlock(&rng_mutex);
145}
146
147static int hwrng_init(struct hwrng *rng)
148{
149 if (kref_get_unless_zero(&rng->ref))
150 goto skip_init;
151
152 if (rng->init) {
153 int ret;
154
155 ret = rng->init(rng);
156 if (ret)
157 return ret;
158 }
159
160 kref_init(&rng->ref);
161 reinit_completion(&rng->cleanup_done);
162
163skip_init:
164 current_quality = rng->quality; /* obsolete */
165
166 return 0;
167}
168
169static int rng_dev_open(struct inode *inode, struct file *filp)
170{
171 /* enforce read-only access to this chrdev */
172 if ((filp->f_mode & FMODE_READ) == 0)
173 return -EINVAL;
174 if (filp->f_mode & FMODE_WRITE)
175 return -EINVAL;
176 return 0;
177}
178
179static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
180 int wait) {
181 int present;
182
183 BUG_ON(!mutex_is_locked(&reading_mutex));
184 if (rng->read) {
185 int err;
186
187 err = rng->read(rng, buffer, size, wait);
188 if (WARN_ON_ONCE(err > 0 && err > size))
189 err = size;
190
191 return err;
192 }
193
194 if (rng->data_present)
195 present = rng->data_present(rng, wait);
196 else
197 present = 1;
198
199 if (present)
200 return rng->data_read(rng, (u32 *)buffer);
201
202 return 0;
203}
204
205static ssize_t rng_dev_read(struct file *filp, char __user *buf,
206 size_t size, loff_t *offp)
207{
208 u8 buffer[RNG_BUFFER_SIZE];
209 ssize_t ret = 0;
210 int err = 0;
211 int bytes_read, len;
212 struct hwrng *rng;
213
214 while (size) {
215 rng = get_current_rng();
216 if (IS_ERR(rng)) {
217 err = PTR_ERR(rng);
218 goto out;
219 }
220 if (!rng) {
221 err = -ENODEV;
222 goto out;
223 }
224
225 if (mutex_lock_interruptible(&reading_mutex)) {
226 err = -ERESTARTSYS;
227 goto out_put;
228 }
229 if (!data_avail) {
230 bytes_read = rng_get_data(rng, rng_buffer,
231 rng_buffer_size(),
232 !(filp->f_flags & O_NONBLOCK));
233 if (bytes_read < 0) {
234 err = bytes_read;
235 goto out_unlock_reading;
236 } else if (bytes_read == 0 &&
237 (filp->f_flags & O_NONBLOCK)) {
238 err = -EAGAIN;
239 goto out_unlock_reading;
240 }
241
242 data_avail = bytes_read;
243 }
244
245 len = data_avail;
246 if (len) {
247 if (len > size)
248 len = size;
249
250 data_avail -= len;
251
252 memcpy(buffer, rng_buffer + data_avail, len);
253 }
254 mutex_unlock(&reading_mutex);
255 put_rng(rng);
256
257 if (len) {
258 if (copy_to_user(buf + ret, buffer, len)) {
259 err = -EFAULT;
260 goto out;
261 }
262
263 size -= len;
264 ret += len;
265 }
266
267
268 if (need_resched())
269 schedule_timeout_interruptible(1);
270
271 if (signal_pending(current)) {
272 err = -ERESTARTSYS;
273 goto out;
274 }
275 }
276out:
277 memzero_explicit(buffer, sizeof(buffer));
278 return ret ? : err;
279
280out_unlock_reading:
281 mutex_unlock(&reading_mutex);
282out_put:
283 put_rng(rng);
284 goto out;
285}
286
287static const struct file_operations rng_chrdev_ops = {
288 .owner = THIS_MODULE,
289 .open = rng_dev_open,
290 .read = rng_dev_read,
291 .llseek = noop_llseek,
292};
293
294static const struct attribute_group *rng_dev_groups[];
295
296static struct miscdevice rng_miscdev = {
297 .minor = HWRNG_MINOR,
298 .name = RNG_MODULE_NAME,
299 .nodename = "hwrng",
300 .fops = &rng_chrdev_ops,
301 .groups = rng_dev_groups,
302};
303
304static int enable_best_rng(void)
305{
306 struct hwrng *rng, *new_rng = NULL;
307 int ret = -ENODEV;
308
309 BUG_ON(!mutex_is_locked(&rng_mutex));
310
311 /* no rng to use? */
312 if (list_empty(&rng_list)) {
313 drop_current_rng();
314 cur_rng_set_by_user = 0;
315 return 0;
316 }
317
318 /* use the rng which offers the best quality */
319 list_for_each_entry(rng, &rng_list, list) {
320 if (!new_rng || rng->quality > new_rng->quality)
321 new_rng = rng;
322 }
323
324 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
325 if (!ret)
326 cur_rng_set_by_user = 0;
327
328 return ret;
329}
330
331static ssize_t rng_current_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t len)
334{
335 int err;
336 struct hwrng *rng, *new_rng;
337
338 err = mutex_lock_interruptible(&rng_mutex);
339 if (err)
340 return -ERESTARTSYS;
341
342 if (sysfs_streq(buf, "")) {
343 err = enable_best_rng();
344 } else {
345 list_for_each_entry(rng, &rng_list, list) {
346 if (sysfs_streq(rng->name, buf)) {
347 err = set_current_rng(rng);
348 if (!err)
349 cur_rng_set_by_user = 1;
350 break;
351 }
352 }
353 }
354 new_rng = get_current_rng_nolock();
355 mutex_unlock(&rng_mutex);
356
357 if (new_rng)
358 put_rng(new_rng);
359
360 return err ? : len;
361}
362
363static ssize_t rng_current_show(struct device *dev,
364 struct device_attribute *attr,
365 char *buf)
366{
367 ssize_t ret;
368 struct hwrng *rng;
369
370 rng = get_current_rng();
371 if (IS_ERR(rng))
372 return PTR_ERR(rng);
373
374 ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
375 put_rng(rng);
376
377 return ret;
378}
379
380static ssize_t rng_available_show(struct device *dev,
381 struct device_attribute *attr,
382 char *buf)
383{
384 int err;
385 struct hwrng *rng;
386
387 err = mutex_lock_interruptible(&rng_mutex);
388 if (err)
389 return -ERESTARTSYS;
390 buf[0] = '\0';
391 list_for_each_entry(rng, &rng_list, list) {
392 strlcat(buf, rng->name, PAGE_SIZE);
393 strlcat(buf, " ", PAGE_SIZE);
394 }
395 strlcat(buf, "\n", PAGE_SIZE);
396 mutex_unlock(&rng_mutex);
397
398 return strlen(buf);
399}
400
401static ssize_t rng_selected_show(struct device *dev,
402 struct device_attribute *attr,
403 char *buf)
404{
405 return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
406}
407
408static ssize_t rng_quality_show(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
411{
412 ssize_t ret;
413 struct hwrng *rng;
414
415 rng = get_current_rng();
416 if (IS_ERR(rng))
417 return PTR_ERR(rng);
418
419 if (!rng) /* no need to put_rng */
420 return -ENODEV;
421
422 ret = sysfs_emit(buf, "%hu\n", rng->quality);
423 put_rng(rng);
424
425 return ret;
426}
427
428static ssize_t rng_quality_store(struct device *dev,
429 struct device_attribute *attr,
430 const char *buf, size_t len)
431{
432 u16 quality;
433 int ret = -EINVAL;
434
435 if (len < 2)
436 return -EINVAL;
437
438 ret = mutex_lock_interruptible(&rng_mutex);
439 if (ret)
440 return -ERESTARTSYS;
441
442 ret = kstrtou16(buf, 0, &quality);
443 if (ret || quality > 1024) {
444 ret = -EINVAL;
445 goto out;
446 }
447
448 if (!current_rng) {
449 ret = -ENODEV;
450 goto out;
451 }
452
453 current_rng->quality = quality;
454 current_quality = quality; /* obsolete */
455
456 /* the best available RNG may have changed */
457 ret = enable_best_rng();
458
459out:
460 mutex_unlock(&rng_mutex);
461 return ret ? ret : len;
462}
463
464static DEVICE_ATTR_RW(rng_current);
465static DEVICE_ATTR_RO(rng_available);
466static DEVICE_ATTR_RO(rng_selected);
467static DEVICE_ATTR_RW(rng_quality);
468
469static struct attribute *rng_dev_attrs[] = {
470 &dev_attr_rng_current.attr,
471 &dev_attr_rng_available.attr,
472 &dev_attr_rng_selected.attr,
473 &dev_attr_rng_quality.attr,
474 NULL
475};
476
477ATTRIBUTE_GROUPS(rng_dev);
478
479static int hwrng_fillfn(void *unused)
480{
481 size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
482 long rc;
483
484 while (!kthread_should_stop()) {
485 unsigned short quality;
486 struct hwrng *rng;
487
488 rng = get_current_rng();
489 if (IS_ERR(rng) || !rng)
490 break;
491 mutex_lock(&reading_mutex);
492 rc = rng_get_data(rng, rng_fillbuf,
493 rng_buffer_size(), 1);
494 if (current_quality != rng->quality)
495 rng->quality = current_quality; /* obsolete */
496 quality = rng->quality;
497 mutex_unlock(&reading_mutex);
498
499 if (rc <= 0)
500 hwrng_msleep(rng, 10000);
501
502 put_rng(rng);
503
504 if (rc <= 0)
505 continue;
506
507 /* If we cannot credit at least one bit of entropy,
508 * keep track of the remainder for the next iteration
509 */
510 entropy = rc * quality * 8 + entropy_credit;
511 if ((entropy >> 10) == 0)
512 entropy_credit = entropy;
513
514 /* Outside lock, sure, but y'know: randomness. */
515 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
516 entropy >> 10, true);
517 }
518 hwrng_fill = NULL;
519 return 0;
520}
521
522int hwrng_register(struct hwrng *rng)
523{
524 int err = -EINVAL;
525 struct hwrng *tmp;
526
527 if (!rng->name || (!rng->data_read && !rng->read))
528 goto out;
529
530 mutex_lock(&rng_mutex);
531
532 /* Must not register two RNGs with the same name. */
533 err = -EEXIST;
534 list_for_each_entry(tmp, &rng_list, list) {
535 if (strcmp(tmp->name, rng->name) == 0)
536 goto out_unlock;
537 }
538 list_add_tail(&rng->list, &rng_list);
539
540 init_completion(&rng->cleanup_done);
541 complete(&rng->cleanup_done);
542 init_completion(&rng->dying);
543
544 /* Adjust quality field to always have a proper value */
545 rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
546
547 if (!current_rng ||
548 (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
549 /*
550 * Set new rng as current as the new rng source
551 * provides better entropy quality and was not
552 * chosen by userspace.
553 */
554 err = set_current_rng(rng);
555 if (err)
556 goto out_unlock;
557 }
558 mutex_unlock(&rng_mutex);
559 return 0;
560out_unlock:
561 mutex_unlock(&rng_mutex);
562out:
563 return err;
564}
565EXPORT_SYMBOL_GPL(hwrng_register);
566
567void hwrng_unregister(struct hwrng *rng)
568{
569 struct hwrng *new_rng;
570 int err;
571
572 mutex_lock(&rng_mutex);
573
574 list_del(&rng->list);
575 complete_all(&rng->dying);
576 if (current_rng == rng) {
577 err = enable_best_rng();
578 if (err) {
579 drop_current_rng();
580 cur_rng_set_by_user = 0;
581 }
582 }
583
584 new_rng = get_current_rng_nolock();
585 if (list_empty(&rng_list)) {
586 mutex_unlock(&rng_mutex);
587 if (hwrng_fill)
588 kthread_stop(hwrng_fill);
589 } else
590 mutex_unlock(&rng_mutex);
591
592 if (new_rng)
593 put_rng(new_rng);
594
595 wait_for_completion(&rng->cleanup_done);
596}
597EXPORT_SYMBOL_GPL(hwrng_unregister);
598
599static void devm_hwrng_release(struct device *dev, void *res)
600{
601 hwrng_unregister(*(struct hwrng **)res);
602}
603
604static int devm_hwrng_match(struct device *dev, void *res, void *data)
605{
606 struct hwrng **r = res;
607
608 if (WARN_ON(!r || !*r))
609 return 0;
610
611 return *r == data;
612}
613
614int devm_hwrng_register(struct device *dev, struct hwrng *rng)
615{
616 struct hwrng **ptr;
617 int error;
618
619 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
620 if (!ptr)
621 return -ENOMEM;
622
623 error = hwrng_register(rng);
624 if (error) {
625 devres_free(ptr);
626 return error;
627 }
628
629 *ptr = rng;
630 devres_add(dev, ptr);
631 return 0;
632}
633EXPORT_SYMBOL_GPL(devm_hwrng_register);
634
635void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
636{
637 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
638}
639EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
640
641long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
642{
643 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
644
645 return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
646}
647EXPORT_SYMBOL_GPL(hwrng_msleep);
648
649long hwrng_yield(struct hwrng *rng)
650{
651 return wait_for_completion_interruptible_timeout(&rng->dying, 1);
652}
653EXPORT_SYMBOL_GPL(hwrng_yield);
654
655static int __init hwrng_modinit(void)
656{
657 int ret;
658
659 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
660 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
661 if (!rng_buffer)
662 return -ENOMEM;
663
664 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
665 if (!rng_fillbuf) {
666 kfree(rng_buffer);
667 return -ENOMEM;
668 }
669
670 ret = misc_register(&rng_miscdev);
671 if (ret) {
672 kfree(rng_fillbuf);
673 kfree(rng_buffer);
674 }
675
676 return ret;
677}
678
679static void __exit hwrng_modexit(void)
680{
681 mutex_lock(&rng_mutex);
682 BUG_ON(current_rng);
683 kfree(rng_buffer);
684 kfree(rng_fillbuf);
685 mutex_unlock(&rng_mutex);
686
687 misc_deregister(&rng_miscdev);
688}
689
690fs_initcall(hwrng_modinit); /* depends on misc_register() */
691module_exit(hwrng_modexit);
692
693MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
694MODULE_LICENSE("GPL");
1/*
2 * hw_random/core.c: HWRNG core API
3 *
4 * Copyright 2006 Michael Buesch <m@bues.ch>
5 * Copyright 2005 (c) MontaVista Software, Inc.
6 *
7 * Please read Documentation/hw_random.txt for details on use.
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 */
12
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/err.h>
16#include <linux/fs.h>
17#include <linux/hw_random.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
20#include <linux/sched/signal.h>
21#include <linux/miscdevice.h>
22#include <linux/module.h>
23#include <linux/random.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27
28#define RNG_MODULE_NAME "hw_random"
29
30static struct hwrng *current_rng;
31/* the current rng has been explicitly chosen by user via sysfs */
32static int cur_rng_set_by_user;
33static struct task_struct *hwrng_fill;
34/* list of registered rngs, sorted decending by quality */
35static LIST_HEAD(rng_list);
36/* Protects rng_list and current_rng */
37static DEFINE_MUTEX(rng_mutex);
38/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
39static DEFINE_MUTEX(reading_mutex);
40static int data_avail;
41static u8 *rng_buffer, *rng_fillbuf;
42static unsigned short current_quality;
43static unsigned short default_quality; /* = 0; default to "off" */
44
45module_param(current_quality, ushort, 0644);
46MODULE_PARM_DESC(current_quality,
47 "current hwrng entropy estimation per mill");
48module_param(default_quality, ushort, 0644);
49MODULE_PARM_DESC(default_quality,
50 "default entropy content of hwrng per mill");
51
52static void drop_current_rng(void);
53static int hwrng_init(struct hwrng *rng);
54static void start_khwrngd(void);
55
56static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
57 int wait);
58
59static size_t rng_buffer_size(void)
60{
61 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
62}
63
64static void add_early_randomness(struct hwrng *rng)
65{
66 int bytes_read;
67 size_t size = min_t(size_t, 16, rng_buffer_size());
68
69 mutex_lock(&reading_mutex);
70 bytes_read = rng_get_data(rng, rng_buffer, size, 1);
71 mutex_unlock(&reading_mutex);
72 if (bytes_read > 0)
73 add_device_randomness(rng_buffer, bytes_read);
74}
75
76static inline void cleanup_rng(struct kref *kref)
77{
78 struct hwrng *rng = container_of(kref, struct hwrng, ref);
79
80 if (rng->cleanup)
81 rng->cleanup(rng);
82
83 complete(&rng->cleanup_done);
84}
85
86static int set_current_rng(struct hwrng *rng)
87{
88 int err;
89
90 BUG_ON(!mutex_is_locked(&rng_mutex));
91
92 err = hwrng_init(rng);
93 if (err)
94 return err;
95
96 drop_current_rng();
97 current_rng = rng;
98
99 return 0;
100}
101
102static void drop_current_rng(void)
103{
104 BUG_ON(!mutex_is_locked(&rng_mutex));
105 if (!current_rng)
106 return;
107
108 /* decrease last reference for triggering the cleanup */
109 kref_put(¤t_rng->ref, cleanup_rng);
110 current_rng = NULL;
111}
112
113/* Returns ERR_PTR(), NULL or refcounted hwrng */
114static struct hwrng *get_current_rng(void)
115{
116 struct hwrng *rng;
117
118 if (mutex_lock_interruptible(&rng_mutex))
119 return ERR_PTR(-ERESTARTSYS);
120
121 rng = current_rng;
122 if (rng)
123 kref_get(&rng->ref);
124
125 mutex_unlock(&rng_mutex);
126 return rng;
127}
128
129static void put_rng(struct hwrng *rng)
130{
131 /*
132 * Hold rng_mutex here so we serialize in case they set_current_rng
133 * on rng again immediately.
134 */
135 mutex_lock(&rng_mutex);
136 if (rng)
137 kref_put(&rng->ref, cleanup_rng);
138 mutex_unlock(&rng_mutex);
139}
140
141static int hwrng_init(struct hwrng *rng)
142{
143 if (kref_get_unless_zero(&rng->ref))
144 goto skip_init;
145
146 if (rng->init) {
147 int ret;
148
149 ret = rng->init(rng);
150 if (ret)
151 return ret;
152 }
153
154 kref_init(&rng->ref);
155 reinit_completion(&rng->cleanup_done);
156
157skip_init:
158 add_early_randomness(rng);
159
160 current_quality = rng->quality ? : default_quality;
161 if (current_quality > 1024)
162 current_quality = 1024;
163
164 if (current_quality == 0 && hwrng_fill)
165 kthread_stop(hwrng_fill);
166 if (current_quality > 0 && !hwrng_fill)
167 start_khwrngd();
168
169 return 0;
170}
171
172static int rng_dev_open(struct inode *inode, struct file *filp)
173{
174 /* enforce read-only access to this chrdev */
175 if ((filp->f_mode & FMODE_READ) == 0)
176 return -EINVAL;
177 if (filp->f_mode & FMODE_WRITE)
178 return -EINVAL;
179 return 0;
180}
181
182static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
183 int wait) {
184 int present;
185
186 BUG_ON(!mutex_is_locked(&reading_mutex));
187 if (rng->read)
188 return rng->read(rng, (void *)buffer, size, wait);
189
190 if (rng->data_present)
191 present = rng->data_present(rng, wait);
192 else
193 present = 1;
194
195 if (present)
196 return rng->data_read(rng, (u32 *)buffer);
197
198 return 0;
199}
200
201static ssize_t rng_dev_read(struct file *filp, char __user *buf,
202 size_t size, loff_t *offp)
203{
204 ssize_t ret = 0;
205 int err = 0;
206 int bytes_read, len;
207 struct hwrng *rng;
208
209 while (size) {
210 rng = get_current_rng();
211 if (IS_ERR(rng)) {
212 err = PTR_ERR(rng);
213 goto out;
214 }
215 if (!rng) {
216 err = -ENODEV;
217 goto out;
218 }
219
220 if (mutex_lock_interruptible(&reading_mutex)) {
221 err = -ERESTARTSYS;
222 goto out_put;
223 }
224 if (!data_avail) {
225 bytes_read = rng_get_data(rng, rng_buffer,
226 rng_buffer_size(),
227 !(filp->f_flags & O_NONBLOCK));
228 if (bytes_read < 0) {
229 err = bytes_read;
230 goto out_unlock_reading;
231 }
232 data_avail = bytes_read;
233 }
234
235 if (!data_avail) {
236 if (filp->f_flags & O_NONBLOCK) {
237 err = -EAGAIN;
238 goto out_unlock_reading;
239 }
240 } else {
241 len = data_avail;
242 if (len > size)
243 len = size;
244
245 data_avail -= len;
246
247 if (copy_to_user(buf + ret, rng_buffer + data_avail,
248 len)) {
249 err = -EFAULT;
250 goto out_unlock_reading;
251 }
252
253 size -= len;
254 ret += len;
255 }
256
257 mutex_unlock(&reading_mutex);
258 put_rng(rng);
259
260 if (need_resched())
261 schedule_timeout_interruptible(1);
262
263 if (signal_pending(current)) {
264 err = -ERESTARTSYS;
265 goto out;
266 }
267 }
268out:
269 return ret ? : err;
270
271out_unlock_reading:
272 mutex_unlock(&reading_mutex);
273out_put:
274 put_rng(rng);
275 goto out;
276}
277
278static const struct file_operations rng_chrdev_ops = {
279 .owner = THIS_MODULE,
280 .open = rng_dev_open,
281 .read = rng_dev_read,
282 .llseek = noop_llseek,
283};
284
285static const struct attribute_group *rng_dev_groups[];
286
287static struct miscdevice rng_miscdev = {
288 .minor = HWRNG_MINOR,
289 .name = RNG_MODULE_NAME,
290 .nodename = "hwrng",
291 .fops = &rng_chrdev_ops,
292 .groups = rng_dev_groups,
293};
294
295static int enable_best_rng(void)
296{
297 int ret = -ENODEV;
298
299 BUG_ON(!mutex_is_locked(&rng_mutex));
300
301 /* rng_list is sorted by quality, use the best (=first) one */
302 if (!list_empty(&rng_list)) {
303 struct hwrng *new_rng;
304
305 new_rng = list_entry(rng_list.next, struct hwrng, list);
306 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
307 if (!ret)
308 cur_rng_set_by_user = 0;
309 } else {
310 drop_current_rng();
311 cur_rng_set_by_user = 0;
312 ret = 0;
313 }
314
315 return ret;
316}
317
318static ssize_t hwrng_attr_current_store(struct device *dev,
319 struct device_attribute *attr,
320 const char *buf, size_t len)
321{
322 int err = -ENODEV;
323 struct hwrng *rng;
324
325 err = mutex_lock_interruptible(&rng_mutex);
326 if (err)
327 return -ERESTARTSYS;
328
329 if (sysfs_streq(buf, "")) {
330 err = enable_best_rng();
331 } else {
332 list_for_each_entry(rng, &rng_list, list) {
333 if (sysfs_streq(rng->name, buf)) {
334 cur_rng_set_by_user = 1;
335 err = set_current_rng(rng);
336 break;
337 }
338 }
339 }
340
341 mutex_unlock(&rng_mutex);
342
343 return err ? : len;
344}
345
346static ssize_t hwrng_attr_current_show(struct device *dev,
347 struct device_attribute *attr,
348 char *buf)
349{
350 ssize_t ret;
351 struct hwrng *rng;
352
353 rng = get_current_rng();
354 if (IS_ERR(rng))
355 return PTR_ERR(rng);
356
357 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
358 put_rng(rng);
359
360 return ret;
361}
362
363static ssize_t hwrng_attr_available_show(struct device *dev,
364 struct device_attribute *attr,
365 char *buf)
366{
367 int err;
368 struct hwrng *rng;
369
370 err = mutex_lock_interruptible(&rng_mutex);
371 if (err)
372 return -ERESTARTSYS;
373 buf[0] = '\0';
374 list_for_each_entry(rng, &rng_list, list) {
375 strlcat(buf, rng->name, PAGE_SIZE);
376 strlcat(buf, " ", PAGE_SIZE);
377 }
378 strlcat(buf, "\n", PAGE_SIZE);
379 mutex_unlock(&rng_mutex);
380
381 return strlen(buf);
382}
383
384static ssize_t hwrng_attr_selected_show(struct device *dev,
385 struct device_attribute *attr,
386 char *buf)
387{
388 return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
389}
390
391static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
392 hwrng_attr_current_show,
393 hwrng_attr_current_store);
394static DEVICE_ATTR(rng_available, S_IRUGO,
395 hwrng_attr_available_show,
396 NULL);
397static DEVICE_ATTR(rng_selected, S_IRUGO,
398 hwrng_attr_selected_show,
399 NULL);
400
401static struct attribute *rng_dev_attrs[] = {
402 &dev_attr_rng_current.attr,
403 &dev_attr_rng_available.attr,
404 &dev_attr_rng_selected.attr,
405 NULL
406};
407
408ATTRIBUTE_GROUPS(rng_dev);
409
410static void __exit unregister_miscdev(void)
411{
412 misc_deregister(&rng_miscdev);
413}
414
415static int __init register_miscdev(void)
416{
417 return misc_register(&rng_miscdev);
418}
419
420static int hwrng_fillfn(void *unused)
421{
422 long rc;
423
424 while (!kthread_should_stop()) {
425 struct hwrng *rng;
426
427 rng = get_current_rng();
428 if (IS_ERR(rng) || !rng)
429 break;
430 mutex_lock(&reading_mutex);
431 rc = rng_get_data(rng, rng_fillbuf,
432 rng_buffer_size(), 1);
433 mutex_unlock(&reading_mutex);
434 put_rng(rng);
435 if (rc <= 0) {
436 pr_warn("hwrng: no data available\n");
437 msleep_interruptible(10000);
438 continue;
439 }
440 /* Outside lock, sure, but y'know: randomness. */
441 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
442 rc * current_quality * 8 >> 10);
443 }
444 hwrng_fill = NULL;
445 return 0;
446}
447
448static void start_khwrngd(void)
449{
450 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
451 if (IS_ERR(hwrng_fill)) {
452 pr_err("hwrng_fill thread creation failed\n");
453 hwrng_fill = NULL;
454 }
455}
456
457int hwrng_register(struct hwrng *rng)
458{
459 int err = -EINVAL;
460 struct hwrng *old_rng, *tmp;
461 struct list_head *rng_list_ptr;
462
463 if (!rng->name || (!rng->data_read && !rng->read))
464 goto out;
465
466 mutex_lock(&rng_mutex);
467 /* Must not register two RNGs with the same name. */
468 err = -EEXIST;
469 list_for_each_entry(tmp, &rng_list, list) {
470 if (strcmp(tmp->name, rng->name) == 0)
471 goto out_unlock;
472 }
473
474 init_completion(&rng->cleanup_done);
475 complete(&rng->cleanup_done);
476
477 /* rng_list is sorted by decreasing quality */
478 list_for_each(rng_list_ptr, &rng_list) {
479 tmp = list_entry(rng_list_ptr, struct hwrng, list);
480 if (tmp->quality < rng->quality)
481 break;
482 }
483 list_add_tail(&rng->list, rng_list_ptr);
484
485 old_rng = current_rng;
486 err = 0;
487 if (!old_rng ||
488 (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
489 /*
490 * Set new rng as current as the new rng source
491 * provides better entropy quality and was not
492 * chosen by userspace.
493 */
494 err = set_current_rng(rng);
495 if (err)
496 goto out_unlock;
497 }
498
499 if (old_rng && !rng->init) {
500 /*
501 * Use a new device's input to add some randomness to
502 * the system. If this rng device isn't going to be
503 * used right away, its init function hasn't been
504 * called yet; so only use the randomness from devices
505 * that don't need an init callback.
506 */
507 add_early_randomness(rng);
508 }
509
510out_unlock:
511 mutex_unlock(&rng_mutex);
512out:
513 return err;
514}
515EXPORT_SYMBOL_GPL(hwrng_register);
516
517void hwrng_unregister(struct hwrng *rng)
518{
519 mutex_lock(&rng_mutex);
520
521 list_del(&rng->list);
522 if (current_rng == rng)
523 enable_best_rng();
524
525 if (list_empty(&rng_list)) {
526 mutex_unlock(&rng_mutex);
527 if (hwrng_fill)
528 kthread_stop(hwrng_fill);
529 } else
530 mutex_unlock(&rng_mutex);
531
532 wait_for_completion(&rng->cleanup_done);
533}
534EXPORT_SYMBOL_GPL(hwrng_unregister);
535
536static void devm_hwrng_release(struct device *dev, void *res)
537{
538 hwrng_unregister(*(struct hwrng **)res);
539}
540
541static int devm_hwrng_match(struct device *dev, void *res, void *data)
542{
543 struct hwrng **r = res;
544
545 if (WARN_ON(!r || !*r))
546 return 0;
547
548 return *r == data;
549}
550
551int devm_hwrng_register(struct device *dev, struct hwrng *rng)
552{
553 struct hwrng **ptr;
554 int error;
555
556 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
557 if (!ptr)
558 return -ENOMEM;
559
560 error = hwrng_register(rng);
561 if (error) {
562 devres_free(ptr);
563 return error;
564 }
565
566 *ptr = rng;
567 devres_add(dev, ptr);
568 return 0;
569}
570EXPORT_SYMBOL_GPL(devm_hwrng_register);
571
572void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
573{
574 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
575}
576EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
577
578static int __init hwrng_modinit(void)
579{
580 int ret = -ENOMEM;
581
582 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
583 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
584 if (!rng_buffer)
585 return -ENOMEM;
586
587 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
588 if (!rng_fillbuf) {
589 kfree(rng_buffer);
590 return -ENOMEM;
591 }
592
593 ret = register_miscdev();
594 if (ret) {
595 kfree(rng_fillbuf);
596 kfree(rng_buffer);
597 }
598
599 return ret;
600}
601
602static void __exit hwrng_modexit(void)
603{
604 mutex_lock(&rng_mutex);
605 BUG_ON(current_rng);
606 kfree(rng_buffer);
607 kfree(rng_fillbuf);
608 mutex_unlock(&rng_mutex);
609
610 unregister_miscdev();
611}
612
613module_init(hwrng_modinit);
614module_exit(hwrng_modexit);
615
616MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
617MODULE_LICENSE("GPL");