Skip to content

Commit 41d8fba

Browse files
beaubelgraverostedt
authored andcommitted
tracing/user_events: Limit max fault-in attempts
When event enablement changes, user_events attempts to update a bit in the user process. If a fault is hit, an attempt to fault-in the page and the write is retried if the page made it in. While this normally requires a couple attempts, it is possible a bad user process could attempt to cause infinite loops. Ensure fault-in attempts either sync or async are limited to a max of 10 attempts for each update. When the max is hit, return -EFAULT so another attempt is not made in all cases. Link: https://lkml.kernel.org/r/20230425225107.8525-5-beaub@linux.microsoft.com Suggested-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
1 parent 97bbce8 commit 41d8fba

1 file changed

Lines changed: 35 additions & 14 deletions

File tree

kernel/trace/trace_events_user.c

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ struct user_event_enabler_fault {
123123
struct work_struct work;
124124
struct user_event_mm *mm;
125125
struct user_event_enabler *enabler;
126+
int attempt;
126127
};
127128

128129
static struct kmem_cache *fault_cache;
@@ -266,11 +267,19 @@ static void user_event_enabler_destroy(struct user_event_enabler *enabler)
266267
kfree(enabler);
267268
}
268269

269-
static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr)
270+
static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
271+
int attempt)
270272
{
271273
bool unlocked;
272274
int ret;
273275

276+
/*
277+
* Normally this is low, ensure that it cannot be taken advantage of by
278+
* bad user processes to cause excessive looping.
279+
*/
280+
if (attempt > 10)
281+
return -EFAULT;
282+
274283
mmap_read_lock(mm->mm);
275284

276285
/* Ensure MM has tasks, cannot use after exit_mm() */
@@ -289,7 +298,7 @@ static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr)
289298

290299
static int user_event_enabler_write(struct user_event_mm *mm,
291300
struct user_event_enabler *enabler,
292-
bool fixup_fault);
301+
bool fixup_fault, int *attempt);
293302

294303
static void user_event_enabler_fault_fixup(struct work_struct *work)
295304
{
@@ -298,9 +307,10 @@ static void user_event_enabler_fault_fixup(struct work_struct *work)
298307
struct user_event_enabler *enabler = fault->enabler;
299308
struct user_event_mm *mm = fault->mm;
300309
unsigned long uaddr = enabler->addr;
310+
int attempt = fault->attempt;
301311
int ret;
302312

303-
ret = user_event_mm_fault_in(mm, uaddr);
313+
ret = user_event_mm_fault_in(mm, uaddr, attempt);
304314

305315
if (ret && ret != -ENOENT) {
306316
struct user_event *user = enabler->event;
@@ -329,7 +339,7 @@ static void user_event_enabler_fault_fixup(struct work_struct *work)
329339

330340
if (!ret) {
331341
mmap_read_lock(mm->mm);
332-
user_event_enabler_write(mm, enabler, true);
342+
user_event_enabler_write(mm, enabler, true, &attempt);
333343
mmap_read_unlock(mm->mm);
334344
}
335345
out:
@@ -341,7 +351,8 @@ static void user_event_enabler_fault_fixup(struct work_struct *work)
341351
}
342352

343353
static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
344-
struct user_event_enabler *enabler)
354+
struct user_event_enabler *enabler,
355+
int attempt)
345356
{
346357
struct user_event_enabler_fault *fault;
347358

@@ -353,6 +364,7 @@ static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
353364
INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
354365
fault->mm = user_event_mm_get(mm);
355366
fault->enabler = enabler;
367+
fault->attempt = attempt;
356368

357369
/* Don't try to queue in again while we have a pending fault */
358370
set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
@@ -372,7 +384,7 @@ static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
372384

373385
static int user_event_enabler_write(struct user_event_mm *mm,
374386
struct user_event_enabler *enabler,
375-
bool fixup_fault)
387+
bool fixup_fault, int *attempt)
376388
{
377389
unsigned long uaddr = enabler->addr;
378390
unsigned long *ptr;
@@ -383,6 +395,8 @@ static int user_event_enabler_write(struct user_event_mm *mm,
383395
lockdep_assert_held(&event_mutex);
384396
mmap_assert_locked(mm->mm);
385397

398+
*attempt += 1;
399+
386400
/* Ensure MM has tasks, cannot use after exit_mm() */
387401
if (refcount_read(&mm->tasks) == 0)
388402
return -ENOENT;
@@ -398,7 +412,7 @@ static int user_event_enabler_write(struct user_event_mm *mm,
398412
if (!fixup_fault)
399413
return -EFAULT;
400414

401-
if (!user_event_enabler_queue_fault(mm, enabler))
415+
if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
402416
pr_warn("user_events: Unable to queue fault handler\n");
403417

404418
return -EFAULT;
@@ -439,15 +453,19 @@ static void user_event_enabler_update(struct user_event *user)
439453
struct user_event_enabler *enabler;
440454
struct user_event_mm *mm = user_event_mm_get_all(user);
441455
struct user_event_mm *next;
456+
int attempt;
442457

443458
while (mm) {
444459
next = mm->next;
445460
mmap_read_lock(mm->mm);
446461
rcu_read_lock();
447462

448-
list_for_each_entry_rcu(enabler, &mm->enablers, link)
449-
if (enabler->event == user)
450-
user_event_enabler_write(mm, enabler, true);
463+
list_for_each_entry_rcu(enabler, &mm->enablers, link) {
464+
if (enabler->event == user) {
465+
attempt = 0;
466+
user_event_enabler_write(mm, enabler, true, &attempt);
467+
}
468+
}
451469

452470
rcu_read_unlock();
453471
mmap_read_unlock(mm->mm);
@@ -695,6 +713,7 @@ static struct user_event_enabler
695713
struct user_event_enabler *enabler;
696714
struct user_event_mm *user_mm;
697715
unsigned long uaddr = (unsigned long)reg->enable_addr;
716+
int attempt = 0;
698717

699718
user_mm = current_user_event_mm();
700719

@@ -715,7 +734,8 @@ static struct user_event_enabler
715734

716735
/* Attempt to reflect the current state within the process */
717736
mmap_read_lock(user_mm->mm);
718-
*write_result = user_event_enabler_write(user_mm, enabler, false);
737+
*write_result = user_event_enabler_write(user_mm, enabler, false,
738+
&attempt);
719739
mmap_read_unlock(user_mm->mm);
720740

721741
/*
@@ -735,7 +755,7 @@ static struct user_event_enabler
735755

736756
if (*write_result) {
737757
/* Attempt to fault-in and retry if it worked */
738-
if (!user_event_mm_fault_in(user_mm, uaddr))
758+
if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
739759
goto retry;
740760

741761
kfree(enabler);
@@ -2195,6 +2215,7 @@ static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
21952215
{
21962216
struct user_event_enabler enabler;
21972217
int result;
2218+
int attempt = 0;
21982219

21992220
memset(&enabler, 0, sizeof(enabler));
22002221
enabler.addr = uaddr;
@@ -2205,14 +2226,14 @@ static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
22052226

22062227
/* Force the bit to be cleared, since no event is attached */
22072228
mmap_read_lock(user_mm->mm);
2208-
result = user_event_enabler_write(user_mm, &enabler, false);
2229+
result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
22092230
mmap_read_unlock(user_mm->mm);
22102231

22112232
mutex_unlock(&event_mutex);
22122233

22132234
if (result) {
22142235
/* Attempt to fault-in and retry if it worked */
2215-
if (!user_event_mm_fault_in(user_mm, uaddr))
2236+
if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
22162237
goto retry;
22172238
}
22182239

0 commit comments

Comments
 (0)