Skip to content

Commit cc2f39d

Browse files
karin0herbertx
authored andcommitted
hwrng: core - use RCU and work_struct to fix race condition
Currently, hwrng_fill is not cleared until the hwrng_fillfn() thread exits. Since hwrng_unregister() reads hwrng_fill outside the rng_mutex lock, a concurrent hwrng_unregister() may call kthread_stop() again on the same task. Additionally, if hwrng_unregister() is called immediately after hwrng_register(), the stopped thread may have never been executed. Thus, hwrng_fill remains dirty even after hwrng_unregister() returns. In this case, subsequent calls to hwrng_register() will fail to start new threads, and hwrng_unregister() will call kthread_stop() on the same freed task. In both cases, a use-after-free occurs: refcount_t: addition on 0; use-after-free. WARNING: ... at lib/refcount.c:25 refcount_warn_saturate+0xec/0x1c0 Call Trace: kthread_stop+0x181/0x360 hwrng_unregister+0x288/0x380 virtrng_remove+0xe3/0x200 This patch fixes the race by protecting the global hwrng_fill pointer inside the rng_mutex lock, so that hwrng_fillfn() thread is stopped only once, and calls to kthread_run() and kthread_stop() are serialized with the lock held. To avoid deadlock in hwrng_fillfn() while being stopped with the lock held, we convert current_rng to RCU, so that get_current_rng() can read current_rng without holding the lock. To remove the lock from put_rng(), we also delay the actual cleanup into a work_struct. Since get_current_rng() no longer returns ERR_PTR values, the IS_ERR() checks are removed from its callers. With hwrng_fill protected by the rng_mutex lock, hwrng_fillfn() can no longer clear hwrng_fill itself. Therefore, if hwrng_fillfn() returns directly after current_rng is dropped, kthread_stop() would be called on a freed task_struct later. To fix this, hwrng_fillfn() calls schedule() now to keep the task alive until being stopped. The kthread_stop() call is also moved from hwrng_unregister() to drop_current_rng(), ensuring kthread_stop() is called on all possible paths where current_rng becomes NULL, so that the thread would not wait forever. Fixes: be4000b ("hwrng: create filler thread") Suggested-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Lianjie Wang <karin0.zst@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent ccb679f commit cc2f39d

2 files changed

Lines changed: 107 additions & 63 deletions

File tree

drivers/char/hw_random/core.c

Lines changed: 105 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -20,23 +20,25 @@
2020
#include <linux/miscdevice.h>
2121
#include <linux/module.h>
2222
#include <linux/random.h>
23+
#include <linux/rcupdate.h>
2324
#include <linux/sched.h>
2425
#include <linux/sched/signal.h>
2526
#include <linux/slab.h>
2627
#include <linux/string.h>
2728
#include <linux/uaccess.h>
29+
#include <linux/workqueue.h>
2830

2931
#define RNG_MODULE_NAME "hw_random"
3032

3133
#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
3234

33-
static struct hwrng *current_rng;
35+
static struct hwrng __rcu *current_rng;
3436
/* the current rng has been explicitly chosen by user via sysfs */
3537
static int cur_rng_set_by_user;
3638
static struct task_struct *hwrng_fill;
3739
/* list of registered rngs */
3840
static LIST_HEAD(rng_list);
39-
/* Protects rng_list and current_rng */
41+
/* Protects rng_list, hwrng_fill and updating on current_rng */
4042
static DEFINE_MUTEX(rng_mutex);
4143
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
4244
static DEFINE_MUTEX(reading_mutex);
@@ -64,18 +66,39 @@ static size_t rng_buffer_size(void)
6466
return RNG_BUFFER_SIZE;
6567
}
6668

67-
static inline void cleanup_rng(struct kref *kref)
69+
static void cleanup_rng_work(struct work_struct *work)
6870
{
69-
struct hwrng *rng = container_of(kref, struct hwrng, ref);
71+
struct hwrng *rng = container_of(work, struct hwrng, cleanup_work);
72+
73+
/*
74+
* Hold rng_mutex here so we serialize in case they set_current_rng
75+
* on rng again immediately.
76+
*/
77+
mutex_lock(&rng_mutex);
78+
79+
/* Skip if rng has been reinitialized. */
80+
if (kref_read(&rng->ref)) {
81+
mutex_unlock(&rng_mutex);
82+
return;
83+
}
7084

7185
if (rng->cleanup)
7286
rng->cleanup(rng);
7387

7488
complete(&rng->cleanup_done);
89+
mutex_unlock(&rng_mutex);
90+
}
91+
92+
static inline void cleanup_rng(struct kref *kref)
93+
{
94+
struct hwrng *rng = container_of(kref, struct hwrng, ref);
95+
96+
schedule_work(&rng->cleanup_work);
7597
}
7698

7799
static int set_current_rng(struct hwrng *rng)
78100
{
101+
struct hwrng *old_rng;
79102
int err;
80103

81104
BUG_ON(!mutex_is_locked(&rng_mutex));
@@ -84,8 +107,14 @@ static int set_current_rng(struct hwrng *rng)
84107
if (err)
85108
return err;
86109

87-
drop_current_rng();
88-
current_rng = rng;
110+
old_rng = rcu_dereference_protected(current_rng,
111+
lockdep_is_held(&rng_mutex));
112+
rcu_assign_pointer(current_rng, rng);
113+
114+
if (old_rng) {
115+
synchronize_rcu();
116+
kref_put(&old_rng->ref, cleanup_rng);
117+
}
89118

90119
/* if necessary, start hwrng thread */
91120
if (!hwrng_fill) {
@@ -101,47 +130,56 @@ static int set_current_rng(struct hwrng *rng)
101130

102131
static void drop_current_rng(void)
103132
{
104-
BUG_ON(!mutex_is_locked(&rng_mutex));
105-
if (!current_rng)
133+
struct hwrng *rng;
134+
135+
rng = rcu_dereference_protected(current_rng,
136+
lockdep_is_held(&rng_mutex));
137+
if (!rng)
106138
return;
107139

140+
RCU_INIT_POINTER(current_rng, NULL);
141+
synchronize_rcu();
142+
143+
if (hwrng_fill) {
144+
kthread_stop(hwrng_fill);
145+
hwrng_fill = NULL;
146+
}
147+
108148
/* decrease last reference for triggering the cleanup */
109-
kref_put(&current_rng->ref, cleanup_rng);
110-
current_rng = NULL;
149+
kref_put(&rng->ref, cleanup_rng);
111150
}
112151

113-
/* Returns ERR_PTR(), NULL or refcounted hwrng */
152+
/* Returns NULL or refcounted hwrng */
114153
static struct hwrng *get_current_rng_nolock(void)
115154
{
116-
if (current_rng)
117-
kref_get(&current_rng->ref);
155+
struct hwrng *rng;
156+
157+
rng = rcu_dereference_protected(current_rng,
158+
lockdep_is_held(&rng_mutex));
159+
if (rng)
160+
kref_get(&rng->ref);
118161

119-
return current_rng;
162+
return rng;
120163
}
121164

122165
static struct hwrng *get_current_rng(void)
123166
{
124167
struct hwrng *rng;
125168

126-
if (mutex_lock_interruptible(&rng_mutex))
127-
return ERR_PTR(-ERESTARTSYS);
169+
rcu_read_lock();
170+
rng = rcu_dereference(current_rng);
171+
if (rng)
172+
kref_get(&rng->ref);
128173

129-
rng = get_current_rng_nolock();
174+
rcu_read_unlock();
130175

131-
mutex_unlock(&rng_mutex);
132176
return rng;
133177
}
134178

135179
static void put_rng(struct hwrng *rng)
136180
{
137-
/*
138-
* Hold rng_mutex here so we serialize in case they set_current_rng
139-
* on rng again immediately.
140-
*/
141-
mutex_lock(&rng_mutex);
142181
if (rng)
143182
kref_put(&rng->ref, cleanup_rng);
144-
mutex_unlock(&rng_mutex);
145183
}
146184

147185
static int hwrng_init(struct hwrng *rng)
@@ -213,10 +251,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
213251

214252
while (size) {
215253
rng = get_current_rng();
216-
if (IS_ERR(rng)) {
217-
err = PTR_ERR(rng);
218-
goto out;
219-
}
220254
if (!rng) {
221255
err = -ENODEV;
222256
goto out;
@@ -303,7 +337,7 @@ static struct miscdevice rng_miscdev = {
303337

304338
static int enable_best_rng(void)
305339
{
306-
struct hwrng *rng, *new_rng = NULL;
340+
struct hwrng *rng, *cur_rng, *new_rng = NULL;
307341
int ret = -ENODEV;
308342

309343
BUG_ON(!mutex_is_locked(&rng_mutex));
@@ -321,7 +355,9 @@ static int enable_best_rng(void)
321355
new_rng = rng;
322356
}
323357

324-
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
358+
cur_rng = rcu_dereference_protected(current_rng,
359+
lockdep_is_held(&rng_mutex));
360+
ret = ((new_rng == cur_rng) ? 0 : set_current_rng(new_rng));
325361
if (!ret)
326362
cur_rng_set_by_user = 0;
327363

@@ -371,8 +407,6 @@ static ssize_t rng_current_show(struct device *dev,
371407
struct hwrng *rng;
372408

373409
rng = get_current_rng();
374-
if (IS_ERR(rng))
375-
return PTR_ERR(rng);
376410

377411
ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
378412
put_rng(rng);
@@ -416,8 +450,6 @@ static ssize_t rng_quality_show(struct device *dev,
416450
struct hwrng *rng;
417451

418452
rng = get_current_rng();
419-
if (IS_ERR(rng))
420-
return PTR_ERR(rng);
421453

422454
if (!rng) /* no need to put_rng */
423455
return -ENODEV;
@@ -432,6 +464,7 @@ static ssize_t rng_quality_store(struct device *dev,
432464
struct device_attribute *attr,
433465
const char *buf, size_t len)
434466
{
467+
struct hwrng *rng;
435468
u16 quality;
436469
int ret = -EINVAL;
437470

@@ -448,12 +481,13 @@ static ssize_t rng_quality_store(struct device *dev,
448481
goto out;
449482
}
450483

451-
if (!current_rng) {
484+
rng = rcu_dereference_protected(current_rng, lockdep_is_held(&rng_mutex));
485+
if (!rng) {
452486
ret = -ENODEV;
453487
goto out;
454488
}
455489

456-
current_rng->quality = quality;
490+
rng->quality = quality;
457491
current_quality = quality; /* obsolete */
458492

459493
/* the best available RNG may have changed */
@@ -489,8 +523,20 @@ static int hwrng_fillfn(void *unused)
489523
struct hwrng *rng;
490524

491525
rng = get_current_rng();
492-
if (IS_ERR(rng) || !rng)
526+
if (!rng) {
527+
/*
528+
* Keep the task_struct alive until kthread_stop()
529+
* is called to avoid UAF in drop_current_rng().
530+
*/
531+
while (!kthread_should_stop()) {
532+
set_current_state(TASK_INTERRUPTIBLE);
533+
if (!kthread_should_stop())
534+
schedule();
535+
}
536+
set_current_state(TASK_RUNNING);
493537
break;
538+
}
539+
494540
mutex_lock(&reading_mutex);
495541
rc = rng_get_data(rng, rng_fillbuf,
496542
rng_buffer_size(), 1);
@@ -518,14 +564,13 @@ static int hwrng_fillfn(void *unused)
518564
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
519565
entropy >> 10, true);
520566
}
521-
hwrng_fill = NULL;
522567
return 0;
523568
}
524569

525570
int hwrng_register(struct hwrng *rng)
526571
{
527572
int err = -EINVAL;
528-
struct hwrng *tmp;
573+
struct hwrng *cur_rng, *tmp;
529574

530575
if (!rng->name || (!rng->data_read && !rng->read))
531576
goto out;
@@ -540,23 +585,27 @@ int hwrng_register(struct hwrng *rng)
540585
}
541586
list_add_tail(&rng->list, &rng_list);
542587

588+
INIT_WORK(&rng->cleanup_work, cleanup_rng_work);
543589
init_completion(&rng->cleanup_done);
544590
complete(&rng->cleanup_done);
545591
init_completion(&rng->dying);
546592

547593
/* Adjust quality field to always have a proper value */
548594
rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
549595

550-
if (!cur_rng_set_by_user &&
551-
(!current_rng || rng->quality > current_rng->quality)) {
552-
/*
553-
* Set new rng as current as the new rng source
554-
* provides better entropy quality and was not
555-
* chosen by userspace.
556-
*/
557-
err = set_current_rng(rng);
558-
if (err)
559-
goto out_unlock;
596+
if (!cur_rng_set_by_user) {
597+
cur_rng = rcu_dereference_protected(current_rng,
598+
lockdep_is_held(&rng_mutex));
599+
if (!cur_rng || rng->quality > cur_rng->quality) {
600+
/*
601+
* Set new rng as current as the new rng source
602+
* provides better entropy quality and was not
603+
* chosen by userspace.
604+
*/
605+
err = set_current_rng(rng);
606+
if (err)
607+
goto out_unlock;
608+
}
560609
}
561610
mutex_unlock(&rng_mutex);
562611
return 0;
@@ -569,32 +618,25 @@ EXPORT_SYMBOL_GPL(hwrng_register);
569618

570619
void hwrng_unregister(struct hwrng *rng)
571620
{
572-
struct hwrng *new_rng;
621+
struct hwrng *cur_rng;
573622
int err;
574623

575624
mutex_lock(&rng_mutex);
576625

577626
list_del(&rng->list);
578627
complete_all(&rng->dying);
579-
if (current_rng == rng) {
628+
629+
cur_rng = rcu_dereference_protected(current_rng,
630+
lockdep_is_held(&rng_mutex));
631+
if (cur_rng == rng) {
580632
err = enable_best_rng();
581633
if (err) {
582634
drop_current_rng();
583635
cur_rng_set_by_user = 0;
584636
}
585637
}
586638

587-
new_rng = get_current_rng_nolock();
588-
if (list_empty(&rng_list)) {
589-
mutex_unlock(&rng_mutex);
590-
if (hwrng_fill)
591-
kthread_stop(hwrng_fill);
592-
} else
593-
mutex_unlock(&rng_mutex);
594-
595-
if (new_rng)
596-
put_rng(new_rng);
597-
639+
mutex_unlock(&rng_mutex);
598640
wait_for_completion(&rng->cleanup_done);
599641
}
600642
EXPORT_SYMBOL_GPL(hwrng_unregister);
@@ -682,7 +724,7 @@ static int __init hwrng_modinit(void)
682724
static void __exit hwrng_modexit(void)
683725
{
684726
mutex_lock(&rng_mutex);
685-
BUG_ON(current_rng);
727+
WARN_ON(rcu_access_pointer(current_rng));
686728
kfree(rng_buffer);
687729
kfree(rng_fillbuf);
688730
mutex_unlock(&rng_mutex);

include/linux/hw_random.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/completion.h>
1616
#include <linux/kref.h>
1717
#include <linux/types.h>
18+
#include <linux/workqueue_types.h>
1819

1920
/**
2021
* struct hwrng - Hardware Random Number Generator driver
@@ -48,6 +49,7 @@ struct hwrng {
4849
/* internal. */
4950
struct list_head list;
5051
struct kref ref;
52+
struct work_struct cleanup_work;
5153
struct completion cleanup_done;
5254
struct completion dying;
5355
};

0 commit comments

Comments
 (0)