Skip to content

Commit 68010e7

Browse files
committed
Merge tag 'trace-v7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix possible dereference of uninitialized pointer When validating the persistent ring buffer on boot up, if the first validation fails, a reference to "head_page" is performed in the error path, but it skips over the initialization of that variable. Move the initialization before the first validation check. - Fix use of event length in validation of persistent ring buffer On boot up, the persistent ring buffer is checked to see if it is valid by several methods. One being to walk all the events in the memory location to make sure they are all valid. The length of the event is used to move to the next event. This length is determined by the data in the buffer. If that length is corrupted, it could possibly make the next event to check located at a bad memory location. Validate the length field of the event when doing the event walk. - Fix function graph on archs that do not support use of ftrace_ops When an architecture defines HAVE_DYNAMIC_FTRACE_WITH_ARGS, it means that its function graph tracer uses the ftrace_ops of the function tracer to call its callbacks. This allows a single registered callback to be called directly instead of checking the callback's meta data's hash entries against the function being traced. For architectures that do not support this feature, it must always call the loop function that tests each registered callback (even if there's only one). The loop function tests each callback's meta data against its hash of functions and will call its callback if the function being traced is in its hash map. The issue was that there was no check against this and the direct function was being called even if the architecture didn't support it. This meant that if function tracing was enabled at the same time as a callback was registered with the function graph tracer, its callback would be called for every function that the function tracer also traced, even if the callback's meta data only wanted to be called back for a small subset of functions. Prevent the direct calling for those architectures that do not support it. - Fix references to trace_event_file for hist files The hist files used event_file_data() to get a reference to the associated trace_event_file the histogram was attached to. This would return a pointer even if the trace_event_file is about to be freed (via RCU). Instead it should use the event_file_file() helper that returns NULL if the trace_event_file is marked to be freed so that no new references are added to it. - Wake up hist poll readers when an event is being freed When polling on a hist file, the task is only awoken when a hist trigger is triggered. This means that if an event is being freed while there's a task waiting on its hist file, it will need to wait until the hist trigger occurs to wake it up and allow the freeing to happen. Note, the event will not be completely freed until all references are removed, and a hist poller keeps a reference. But it should still be woken when the event is being freed. * tag 'trace-v7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Wake up poll waiters for hist files when removing an event tracing: Fix checking of freed trace_event_file for hist files fgraph: Do not call handlers direct when not using ftrace_ops tracing: ring-buffer: Fix to check event length before using ring-buffer: Fix possible dereference of uninitialized pointer
2 parents b3f1da2 + 9678e53 commit 68010e7

6 files changed

Lines changed: 38 additions & 8 deletions

File tree

include/linux/ftrace.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1092,10 +1092,17 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
10921092

10931093
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
10941094
#ifndef ftrace_graph_func
1095-
#define ftrace_graph_func ftrace_stub
1096-
#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
1095+
# define ftrace_graph_func ftrace_stub
1096+
# define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
1097+
/*
1098+
* The function graph is called every time the function tracer is called.
1099+
* It must always test the ops hash and cannot just directly call
1100+
* the handler.
1101+
*/
1102+
# define FGRAPH_NO_DIRECT 1
10971103
#else
1098-
#define FTRACE_OPS_GRAPH_STUB 0
1104+
# define FTRACE_OPS_GRAPH_STUB 0
1105+
# define FGRAPH_NO_DIRECT 0
10991106
#endif
11001107
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
11011108

include/linux/trace_events.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -683,6 +683,11 @@ static inline void hist_poll_wakeup(void)
683683

684684
#define hist_poll_wait(file, wait) \
685685
poll_wait(file, &hist_poll_wq, wait)
686+
687+
#else
688+
static inline void hist_poll_wakeup(void)
689+
{
690+
}
686691
#endif
687692

688693
#define __TRACE_EVENT_FLAGS(name, value) \

kernel/trace/fgraph.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,11 @@ static struct fgraph_ops fgraph_stub = {
539539
static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
540540
DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
541541
DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
542+
#if FGRAPH_NO_DIRECT
543+
static DEFINE_STATIC_KEY_FALSE(fgraph_do_direct);
544+
#else
542545
static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
546+
#endif
543547

544548
/**
545549
* ftrace_graph_stop - set to permanently disable function graph tracing
@@ -843,7 +847,7 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointe
843847
bitmap = get_bitmap_bits(current, offset);
844848

845849
#ifdef CONFIG_HAVE_STATIC_CALL
846-
if (static_branch_likely(&fgraph_do_direct)) {
850+
if (!FGRAPH_NO_DIRECT && static_branch_likely(&fgraph_do_direct)) {
847851
if (test_bit(fgraph_direct_gops->idx, &bitmap))
848852
static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs);
849853
} else
@@ -1285,6 +1289,9 @@ static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *go
12851289
trace_func_graph_ret_t retfunc = NULL;
12861290
int i;
12871291

1292+
if (FGRAPH_NO_DIRECT)
1293+
return;
1294+
12881295
if (gops) {
12891296
func = gops->entryfunc;
12901297
retfunc = gops->retfunc;
@@ -1308,6 +1315,9 @@ static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *go
13081315

13091316
static void ftrace_graph_disable_direct(bool disable_branch)
13101317
{
1318+
if (FGRAPH_NO_DIRECT)
1319+
return;
1320+
13111321
if (disable_branch)
13121322
static_branch_disable(&fgraph_do_direct);
13131323
static_call_update(fgraph_func, ftrace_graph_entry_stub);

kernel/trace/ring_buffer.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1849,16 +1849,20 @@ static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu
18491849
struct ring_buffer_event *event;
18501850
u64 ts, delta;
18511851
int events = 0;
1852+
int len;
18521853
int e;
18531854

18541855
*delta_ptr = 0;
18551856
*timestamp = 0;
18561857

18571858
ts = dpage->time_stamp;
18581859

1859-
for (e = 0; e < tail; e += rb_event_length(event)) {
1860+
for (e = 0; e < tail; e += len) {
18601861

18611862
event = (struct ring_buffer_event *)(dpage->data + e);
1863+
len = rb_event_length(event);
1864+
if (len <= 0 || len > tail - e)
1865+
return -1;
18621866

18631867
switch (event->type_len) {
18641868

@@ -1919,6 +1923,8 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
19191923
if (!meta || !meta->head_buffer)
19201924
return;
19211925

1926+
orig_head = head_page = cpu_buffer->head_page;
1927+
19221928
/* Do the reader page first */
19231929
ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
19241930
if (ret < 0) {
@@ -1929,7 +1935,6 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
19291935
entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
19301936
local_set(&cpu_buffer->reader_page->entries, ret);
19311937

1932-
orig_head = head_page = cpu_buffer->head_page;
19331938
ts = head_page->page->time_stamp;
19341939

19351940
/*

kernel/trace/trace_events.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1311,6 +1311,9 @@ static void remove_event_file_dir(struct trace_event_file *file)
13111311
free_event_filter(file->filter);
13121312
file->flags |= EVENT_FILE_FL_FREED;
13131313
event_file_put(file);
1314+
1315+
/* Wake up hist poll waiters to notice the EVENT_FILE_FL_FREED flag. */
1316+
hist_poll_wakeup();
13141317
}
13151318

13161319
/*

kernel/trace/trace_events_hist.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5784,7 +5784,7 @@ static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wai
57845784

57855785
guard(mutex)(&event_mutex);
57865786

5787-
event_file = event_file_data(file);
5787+
event_file = event_file_file(file);
57885788
if (!event_file)
57895789
return EPOLLERR;
57905790

@@ -5822,7 +5822,7 @@ static int event_hist_open(struct inode *inode, struct file *file)
58225822

58235823
guard(mutex)(&event_mutex);
58245824

5825-
event_file = event_file_data(file);
5825+
event_file = event_file_file(file);
58265826
if (!event_file) {
58275827
ret = -ENODEV;
58285828
goto err;

0 commit comments

Comments
 (0)