Skip to content

Commit a1f157c

Browse files
Zheng Yejianrostedt
authored andcommitted
tracing: Expand all ring buffers individually
The ring buffer of global_trace is set to the minimum size in order to save memory on boot up and then it will be expand when some trace feature enabled. However currently operations under an instance can also cause global_trace ring buffer being expanded, and the expanded memory would be wasted if global_trace then not being used. See following case, we enable 'sched_switch' event in instance 'A', then ring buffer of global_trace is unexpectedly expanded to be 1410KB, also the '(expanded: 1408)' from 'buffer_size_kb' of instance is confusing. # cd /sys/kernel/tracing # mkdir instances/A # cat buffer_size_kb 7 (expanded: 1408) # cat instances/A/buffer_size_kb 1410 (expanded: 1408) # echo sched:sched_switch > instances/A/set_event # cat buffer_size_kb 1410 # cat instances/A/buffer_size_kb 1410 To fix it, we can: - Make 'ring_buffer_expanded' as a member of 'struct trace_array'; - Make 'ring_buffer_expanded' of instance is defaultly true, global_trace is defaultly false; - In order not to expose 'global_trace' outside of file 'kernel/trace/trace.c', introduce trace_set_ring_buffer_expanded() to set 'ring_buffer_expanded' as 'true'; - Pass the expected trace_array to tracing_update_buffers(). Link: https://lore.kernel.org/linux-trace-kernel/20230906091837.3998020-1-zhengyejian1@huawei.com Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
1 parent 8a749fd commit a1f157c

3 files changed

Lines changed: 45 additions & 33 deletions

File tree

kernel/trace/trace.c

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,6 @@
5454
#include "trace.h"
5555
#include "trace_output.h"
5656

57-
/*
58-
* On boot up, the ring buffer is set to the minimum size, so that
59-
* we do not waste memory on systems that are not using tracing.
60-
*/
61-
bool ring_buffer_expanded;
62-
6357
#ifdef CONFIG_FTRACE_STARTUP_TEST
6458
/*
6559
* We need to change this state when a selftest is running.
@@ -202,7 +196,7 @@ static int __init set_cmdline_ftrace(char *str)
202196
strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203197
default_bootup_tracer = bootup_tracer_buf;
204198
/* We are using ftrace early, expand it */
205-
ring_buffer_expanded = true;
199+
trace_set_ring_buffer_expanded(NULL);
206200
return 1;
207201
}
208202
__setup("ftrace=", set_cmdline_ftrace);
@@ -247,7 +241,7 @@ static int __init boot_alloc_snapshot(char *str)
247241
} else {
248242
allocate_snapshot = true;
249243
/* We also need the main ring buffer expanded */
250-
ring_buffer_expanded = true;
244+
trace_set_ring_buffer_expanded(NULL);
251245
}
252246
return 1;
253247
}
@@ -490,6 +484,13 @@ static struct trace_array global_trace = {
490484
.trace_flags = TRACE_DEFAULT_FLAGS,
491485
};
492486

487+
void trace_set_ring_buffer_expanded(struct trace_array *tr)
488+
{
489+
if (!tr)
490+
tr = &global_trace;
491+
tr->ring_buffer_expanded = true;
492+
}
493+
493494
LIST_HEAD(ftrace_trace_arrays);
494495

495496
int trace_array_get(struct trace_array *this_tr)
@@ -2012,7 +2013,7 @@ static int run_tracer_selftest(struct tracer *type)
20122013
#ifdef CONFIG_TRACER_MAX_TRACE
20132014
if (type->use_max_tr) {
20142015
/* If we expanded the buffers, make sure the max is expanded too */
2015-
if (ring_buffer_expanded)
2016+
if (tr->ring_buffer_expanded)
20162017
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
20172018
RING_BUFFER_ALL_CPUS);
20182019
tr->allocated_snapshot = true;
@@ -2038,7 +2039,7 @@ static int run_tracer_selftest(struct tracer *type)
20382039
tr->allocated_snapshot = false;
20392040

20402041
/* Shrink the max buffer again */
2041-
if (ring_buffer_expanded)
2042+
if (tr->ring_buffer_expanded)
20422043
ring_buffer_resize(tr->max_buffer.buffer, 1,
20432044
RING_BUFFER_ALL_CPUS);
20442045
}
@@ -3403,7 +3404,7 @@ void trace_printk_init_buffers(void)
34033404
pr_warn("**********************************************************\n");
34043405

34053406
/* Expand the buffers to set size */
3406-
tracing_update_buffers();
3407+
tracing_update_buffers(&global_trace);
34073408

34083409
buffers_allocated = 1;
34093410

@@ -6374,7 +6375,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
63746375
* we use the size that was given, and we can forget about
63756376
* expanding it later.
63766377
*/
6377-
ring_buffer_expanded = true;
6378+
trace_set_ring_buffer_expanded(tr);
63786379

63796380
/* May be called before buffers are initialized */
63806381
if (!tr->array_buffer.buffer)
@@ -6452,6 +6453,7 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
64526453

64536454
/**
64546455
* tracing_update_buffers - used by tracing facility to expand ring buffers
6456+
* @tr: The tracing instance
64556457
*
64566458
* To save on memory when the tracing is never used on a system with it
64576459
* configured in. The ring buffers are set to a minimum size. But once
@@ -6460,13 +6462,13 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
64606462
*
64616463
* This function is to be called when a tracer is about to be used.
64626464
*/
6463-
int tracing_update_buffers(void)
6465+
int tracing_update_buffers(struct trace_array *tr)
64646466
{
64656467
int ret = 0;
64666468

64676469
mutex_lock(&trace_types_lock);
6468-
if (!ring_buffer_expanded)
6469-
ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6470+
if (!tr->ring_buffer_expanded)
6471+
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
64706472
RING_BUFFER_ALL_CPUS);
64716473
mutex_unlock(&trace_types_lock);
64726474

@@ -6520,7 +6522,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
65206522

65216523
mutex_lock(&trace_types_lock);
65226524

6523-
if (!ring_buffer_expanded) {
6525+
if (!tr->ring_buffer_expanded) {
65246526
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
65256527
RING_BUFFER_ALL_CPUS);
65266528
if (ret < 0)
@@ -7192,7 +7194,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
71927194
}
71937195

71947196
if (buf_size_same) {
7195-
if (!ring_buffer_expanded)
7197+
if (!tr->ring_buffer_expanded)
71967198
r = sprintf(buf, "%lu (expanded: %lu)\n",
71977199
size >> 10,
71987200
trace_buf_size >> 10);
@@ -7249,10 +7251,10 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
72497251
mutex_lock(&trace_types_lock);
72507252
for_each_tracing_cpu(cpu) {
72517253
size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7252-
if (!ring_buffer_expanded)
7254+
if (!tr->ring_buffer_expanded)
72537255
expanded_size += trace_buf_size >> 10;
72547256
}
7255-
if (ring_buffer_expanded)
7257+
if (tr->ring_buffer_expanded)
72567258
r = sprintf(buf, "%lu\n", size);
72577259
else
72587260
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
@@ -7646,7 +7648,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
76467648
unsigned long val;
76477649
int ret;
76487650

7649-
ret = tracing_update_buffers();
7651+
ret = tracing_update_buffers(tr);
76507652
if (ret < 0)
76517653
return ret;
76527654

@@ -9550,6 +9552,9 @@ static struct trace_array *trace_array_create(const char *name)
95509552
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
95519553
goto out_free_tr;
95529554

9555+
/* The ring buffer is defaultly expanded */
9556+
trace_set_ring_buffer_expanded(tr);
9557+
95539558
if (ftrace_allocate_ftrace_ops(tr) < 0)
95549559
goto out_free_tr;
95559560

@@ -10444,7 +10449,7 @@ __init static int tracer_alloc_buffers(void)
1044410449
trace_printk_init_buffers();
1044510450

1044610451
/* To save memory, keep the ring buffer size to its minimum */
10447-
if (ring_buffer_expanded)
10452+
if (global_trace.ring_buffer_expanded)
1044810453
ring_buf_size = trace_buf_size;
1044910454
else
1045010455
ring_buf_size = 1;

kernel/trace/trace.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,11 @@ struct trace_array {
410410
struct cond_snapshot *cond_snapshot;
411411
#endif
412412
struct trace_func_repeats __percpu *last_func_repeats;
413+
/*
414+
* On boot up, the ring buffer is set to the minimum size, so that
415+
* we do not waste memory on systems that are not using tracing.
416+
*/
417+
bool ring_buffer_expanded;
413418
};
414419

415420
enum {
@@ -761,7 +766,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
761766
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
762767
extern int DYN_FTRACE_TEST_NAME2(void);
763768

764-
extern bool ring_buffer_expanded;
769+
extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
765770
extern bool tracing_selftest_disabled;
766771

767772
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1305,7 +1310,7 @@ static inline void trace_branch_disable(void)
13051310
#endif /* CONFIG_BRANCH_TRACER */
13061311

13071312
/* set ring buffers to default size if not already done so */
1308-
int tracing_update_buffers(void);
1313+
int tracing_update_buffers(struct trace_array *tr);
13091314

13101315
union trace_synth_field {
13111316
u8 as_u8;

kernel/trace/trace_events.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1166,7 +1166,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
11661166
if (!cnt)
11671167
return 0;
11681168

1169-
ret = tracing_update_buffers();
1169+
ret = tracing_update_buffers(tr);
11701170
if (ret < 0)
11711171
return ret;
11721172

@@ -1397,18 +1397,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
13971397
if (ret)
13981398
return ret;
13991399

1400-
ret = tracing_update_buffers();
1401-
if (ret < 0)
1402-
return ret;
1403-
14041400
switch (val) {
14051401
case 0:
14061402
case 1:
14071403
ret = -ENODEV;
14081404
mutex_lock(&event_mutex);
14091405
file = event_file_data(filp);
1410-
if (likely(file))
1406+
if (likely(file)) {
1407+
ret = tracing_update_buffers(file->tr);
1408+
if (ret < 0) {
1409+
mutex_unlock(&event_mutex);
1410+
return ret;
1411+
}
14111412
ret = ftrace_event_enable_disable(file, val);
1413+
}
14121414
mutex_unlock(&event_mutex);
14131415
break;
14141416

@@ -1482,7 +1484,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
14821484
if (ret)
14831485
return ret;
14841486

1485-
ret = tracing_update_buffers();
1487+
ret = tracing_update_buffers(dir->tr);
14861488
if (ret < 0)
14871489
return ret;
14881490

@@ -1956,7 +1958,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
19561958
if (!cnt)
19571959
return 0;
19581960

1959-
ret = tracing_update_buffers();
1961+
ret = tracing_update_buffers(tr);
19601962
if (ret < 0)
19611963
return ret;
19621964

@@ -2824,7 +2826,7 @@ static __init int setup_trace_triggers(char *str)
28242826
int i;
28252827

28262828
strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
2827-
ring_buffer_expanded = true;
2829+
trace_set_ring_buffer_expanded(NULL);
28282830
disable_tracing_selftest("running event triggers");
28292831

28302832
buf = bootup_trigger_buf;
@@ -3614,7 +3616,7 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
36143616
static __init int setup_trace_event(char *str)
36153617
{
36163618
strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
3617-
ring_buffer_expanded = true;
3619+
trace_set_ring_buffer_expanded(NULL);
36183620
disable_tracing_selftest("running event tracing");
36193621

36203622
return 1;

0 commit comments

Comments
 (0)