Skip to content

Commit 3e0fea0

Browse files
torvaldsrostedt
authored andcommitted
tracing/user_events: Split up mm alloc and attach
When a new mm is being created in a fork() path it currently is allocated and then attached in one go. This leaves the mm exposed out to the tracing register callbacks while any parent enabler locations are copied in. This should not happen. Split up mm alloc and attach as unique operations. When duplicating enablers, first alloc, then duplicate, and only upon success, attach. This prevents any timing window outside of the event_reg mutex for enablement walking. This allows for dropping RCU requirement for enablement walking in later patches. Link: https://lkml.kernel.org/r/20230519230741.669-2-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=whTBvXJuoi_kACo3qi5WZUmRrhyA-_=rRFsycTytmB6qw@mail.gmail.com/ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [ change log written by Beau Belgrave ] Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
1 parent 632478a commit 3e0fea0

1 file changed

Lines changed: 18 additions & 11 deletions

File tree

kernel/trace/trace_events_user.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -539,10 +539,9 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
539539
return found;
540540
}
541541

542-
static struct user_event_mm *user_event_mm_create(struct task_struct *t)
542+
static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
543543
{
544544
struct user_event_mm *user_mm;
545-
unsigned long flags;
546545

547546
user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
548547

@@ -554,12 +553,6 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
554553
refcount_set(&user_mm->refcnt, 1);
555554
refcount_set(&user_mm->tasks, 1);
556555

557-
spin_lock_irqsave(&user_event_mms_lock, flags);
558-
list_add_rcu(&user_mm->link, &user_event_mms);
559-
spin_unlock_irqrestore(&user_event_mms_lock, flags);
560-
561-
t->user_event_mm = user_mm;
562-
563556
/*
564557
* The lifetime of the memory descriptor can slightly outlast
565558
* the task lifetime if a ref to the user_event_mm is taken
@@ -573,17 +566,30 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
573566
return user_mm;
574567
}
575568

569+
static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
570+
{
571+
unsigned long flags;
572+
573+
spin_lock_irqsave(&user_event_mms_lock, flags);
574+
list_add_rcu(&user_mm->link, &user_event_mms);
575+
spin_unlock_irqrestore(&user_event_mms_lock, flags);
576+
577+
t->user_event_mm = user_mm;
578+
}
579+
576580
static struct user_event_mm *current_user_event_mm(void)
577581
{
578582
struct user_event_mm *user_mm = current->user_event_mm;
579583

580584
if (user_mm)
581585
goto inc;
582586

583-
user_mm = user_event_mm_create(current);
587+
user_mm = user_event_mm_alloc(current);
584588

585589
if (!user_mm)
586590
goto error;
591+
592+
user_event_mm_attach(user_mm, current);
587593
inc:
588594
refcount_inc(&user_mm->refcnt);
589595
error:
@@ -671,7 +677,7 @@ void user_event_mm_remove(struct task_struct *t)
671677

672678
void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
673679
{
674-
struct user_event_mm *mm = user_event_mm_create(t);
680+
struct user_event_mm *mm = user_event_mm_alloc(t);
675681
struct user_event_enabler *enabler;
676682

677683
if (!mm)
@@ -685,10 +691,11 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
685691

686692
rcu_read_unlock();
687693

694+
user_event_mm_attach(mm, t);
688695
return;
689696
error:
690697
rcu_read_unlock();
691-
user_event_mm_remove(t);
698+
user_event_mm_destroy(mm);
692699
}
693700

694701
static bool current_user_event_enabler_exists(unsigned long uaddr,

0 commit comments

Comments
 (0)