Skip to content

Commit e0fa652

Browse files
committed
Merge branch 'add libbpf getters for individual ringbuffers'
Martin Kelly says: ==================== This patch series adds a new ring__ API to libbpf exposing getters for accessing the individual ringbuffers inside a struct ring_buffer. This is useful for polling individually, getting available data, or similar use cases. The API looks like this, and was roughly proposed by Andrii Nakryiko in another thread: Getting a ring struct: struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx); Using the ring struct: unsigned long ring__consumer_pos(const struct ring *r); unsigned long ring__producer_pos(const struct ring *r); size_t ring__avail_data_size(const struct ring *r); size_t ring__size(const struct ring *r); int ring__map_fd(const struct ring *r); int ring__consume(struct ring *r); Changes in v2: - Addressed all feedback from Andrii Nakryiko ==================== Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2 parents 831916f + cb3d7dd commit e0fa652

5 files changed

Lines changed: 193 additions & 13 deletions

File tree

tools/lib/bpf/libbpf.h

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1229,6 +1229,7 @@ LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
12291229

12301230
/* Ring buffer APIs */
12311231
struct ring_buffer;
1232+
struct ring;
12321233
struct user_ring_buffer;
12331234

12341235
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
@@ -1249,6 +1250,78 @@ LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
12491250
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
12501251
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
12511252

1253+
/**
1254+
* @brief **ring_buffer__ring()** returns the ringbuffer object inside a given
1255+
* ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance.
1256+
*
1257+
* @param rb A ringbuffer manager object.
1258+
* @param idx An index into the ringbuffers contained within the ringbuffer
1259+
* manager object. The index is 0-based and corresponds to the order in which
1260+
* ring_buffer__add was called.
1261+
* @return A ringbuffer object on success; NULL and errno set if the index is
1262+
* invalid.
1263+
*/
1264+
LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb,
1265+
unsigned int idx);
1266+
1267+
/**
1268+
* @brief **ring__consumer_pos()** returns the current consumer position in the
1269+
* given ringbuffer.
1270+
*
1271+
* @param r A ringbuffer object.
1272+
* @return The current consumer position.
1273+
*/
1274+
LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r);
1275+
1276+
/**
1277+
* @brief **ring__producer_pos()** returns the current producer position in the
1278+
* given ringbuffer.
1279+
*
1280+
* @param r A ringbuffer object.
1281+
* @return The current producer position.
1282+
*/
1283+
LIBBPF_API unsigned long ring__producer_pos(const struct ring *r);
1284+
1285+
/**
1286+
* @brief **ring__avail_data_size()** returns the number of bytes in the
1287+
* ringbuffer not yet consumed. This has no locking associated with it, so it
1288+
* can be inaccurate if operations are ongoing while this is called. However, it
1289+
* should still show the correct trend over the long-term.
1290+
*
1291+
* @param r A ringbuffer object.
1292+
* @return The number of bytes not yet consumed.
1293+
*/
1294+
LIBBPF_API size_t ring__avail_data_size(const struct ring *r);
1295+
1296+
/**
1297+
* @brief **ring__size()** returns the total size of the ringbuffer's map data
1298+
* area (excluding special producer/consumer pages). Effectively this gives the
1299+
* amount of usable bytes of data inside the ringbuffer.
1300+
*
1301+
* @param r A ringbuffer object.
1302+
* @return The total size of the ringbuffer map data area.
1303+
*/
1304+
LIBBPF_API size_t ring__size(const struct ring *r);
1305+
1306+
/**
1307+
* @brief **ring__map_fd()** returns the file descriptor underlying the given
1308+
* ringbuffer.
1309+
*
1310+
* @param r A ringbuffer object.
1311+
* @return The underlying ringbuffer file descriptor
1312+
*/
1313+
LIBBPF_API int ring__map_fd(const struct ring *r);
1314+
1315+
/**
1316+
* @brief **ring__consume()** consumes available ringbuffer data without event
1317+
* polling.
1318+
*
1319+
* @param r A ringbuffer object.
1320+
* @return The number of records consumed (or INT_MAX, whichever is less), or
1321+
* a negative number if any of the callbacks return an error.
1322+
*/
1323+
LIBBPF_API int ring__consume(struct ring *r);
1324+
12521325
struct user_ring_buffer_opts {
12531326
size_t sz; /* size of this struct, for forward/backward compatibility */
12541327
};

tools/lib/bpf/libbpf.map

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -400,4 +400,11 @@ LIBBPF_1.3.0 {
400400
bpf_program__attach_netfilter;
401401
bpf_program__attach_tcx;
402402
bpf_program__attach_uprobe_multi;
403+
ring__avail_data_size;
404+
ring__consume;
405+
ring__consumer_pos;
406+
ring__map_fd;
407+
ring__producer_pos;
408+
ring__size;
409+
ring_buffer__ring;
403410
} LIBBPF_1.2.0;

tools/lib/bpf/ringbuf.c

Lines changed: 72 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ struct ring {
3434

3535
struct ring_buffer {
3636
struct epoll_event *events;
37-
struct ring *rings;
37+
struct ring **rings;
3838
size_t page_size;
3939
int epoll_fd;
4040
int ring_cnt;
@@ -57,7 +57,7 @@ struct ringbuf_hdr {
5757
__u32 pad;
5858
};
5959

60-
static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
60+
static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
6161
{
6262
if (r->consumer_pos) {
6363
munmap(r->consumer_pos, rb->page_size);
@@ -67,6 +67,8 @@ static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
6767
munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
6868
r->producer_pos = NULL;
6969
}
70+
71+
free(r);
7072
}
7173

7274
/* Add extra RINGBUF maps to this ring buffer manager */
@@ -107,8 +109,10 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
107109
return libbpf_err(-ENOMEM);
108110
rb->events = tmp;
109111

110-
r = &rb->rings[rb->ring_cnt];
111-
memset(r, 0, sizeof(*r));
112+
r = calloc(1, sizeof(*r));
113+
if (!r)
114+
return libbpf_err(-ENOMEM);
115+
rb->rings[rb->ring_cnt] = r;
112116

113117
r->map_fd = map_fd;
114118
r->sample_cb = sample_cb;
@@ -121,7 +125,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
121125
err = -errno;
122126
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
123127
map_fd, err);
124-
return libbpf_err(err);
128+
goto err_out;
125129
}
126130
r->consumer_pos = tmp;
127131

@@ -131,16 +135,16 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
131135
*/
132136
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
133137
if (mmap_sz != (__u64)(size_t)mmap_sz) {
138+
err = -E2BIG;
134139
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
135-
return libbpf_err(-E2BIG);
140+
goto err_out;
136141
}
137142
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
138143
if (tmp == MAP_FAILED) {
139144
err = -errno;
140-
ringbuf_unmap_ring(rb, r);
141145
pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
142146
map_fd, err);
143-
return libbpf_err(err);
147+
goto err_out;
144148
}
145149
r->producer_pos = tmp;
146150
r->data = tmp + rb->page_size;
@@ -152,14 +156,17 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
152156
e->data.fd = rb->ring_cnt;
153157
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
154158
err = -errno;
155-
ringbuf_unmap_ring(rb, r);
156159
pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
157160
map_fd, err);
158-
return libbpf_err(err);
161+
goto err_out;
159162
}
160163

161164
rb->ring_cnt++;
162165
return 0;
166+
167+
err_out:
168+
ringbuf_free_ring(rb, r);
169+
return libbpf_err(err);
163170
}
164171

165172
void ring_buffer__free(struct ring_buffer *rb)
@@ -170,7 +177,7 @@ void ring_buffer__free(struct ring_buffer *rb)
170177
return;
171178

172179
for (i = 0; i < rb->ring_cnt; ++i)
173-
ringbuf_unmap_ring(rb, &rb->rings[i]);
180+
ringbuf_free_ring(rb, rb->rings[i]);
174181
if (rb->epoll_fd >= 0)
175182
close(rb->epoll_fd);
176183

@@ -278,7 +285,7 @@ int ring_buffer__consume(struct ring_buffer *rb)
278285
int i;
279286

280287
for (i = 0; i < rb->ring_cnt; i++) {
281-
struct ring *ring = &rb->rings[i];
288+
struct ring *ring = rb->rings[i];
282289

283290
err = ringbuf_process_ring(ring);
284291
if (err < 0)
@@ -305,7 +312,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
305312

306313
for (i = 0; i < cnt; i++) {
307314
__u32 ring_id = rb->events[i].data.fd;
308-
struct ring *ring = &rb->rings[ring_id];
315+
struct ring *ring = rb->rings[ring_id];
309316

310317
err = ringbuf_process_ring(ring);
311318
if (err < 0)
@@ -323,6 +330,58 @@ int ring_buffer__epoll_fd(const struct ring_buffer *rb)
323330
return rb->epoll_fd;
324331
}
325332

333+
struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
334+
{
335+
if (idx >= rb->ring_cnt)
336+
return errno = ERANGE, NULL;
337+
338+
return rb->rings[idx];
339+
}
340+
341+
unsigned long ring__consumer_pos(const struct ring *r)
342+
{
343+
/* Synchronizes with smp_store_release() in ringbuf_process_ring(). */
344+
return smp_load_acquire(r->consumer_pos);
345+
}
346+
347+
unsigned long ring__producer_pos(const struct ring *r)
348+
{
349+
/* Synchronizes with smp_store_release() in __bpf_ringbuf_reserve() in
350+
* the kernel.
351+
*/
352+
return smp_load_acquire(r->producer_pos);
353+
}
354+
355+
size_t ring__avail_data_size(const struct ring *r)
356+
{
357+
unsigned long cons_pos, prod_pos;
358+
359+
cons_pos = ring__consumer_pos(r);
360+
prod_pos = ring__producer_pos(r);
361+
return prod_pos - cons_pos;
362+
}
363+
364+
size_t ring__size(const struct ring *r)
365+
{
366+
return r->mask + 1;
367+
}
368+
369+
int ring__map_fd(const struct ring *r)
370+
{
371+
return r->map_fd;
372+
}
373+
374+
int ring__consume(struct ring *r)
375+
{
376+
int64_t res;
377+
378+
res = ringbuf_process_ring(r);
379+
if (res < 0)
380+
return libbpf_err(res);
381+
382+
return res > INT_MAX ? INT_MAX : res;
383+
}
384+
326385
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
327386
{
328387
if (rb->consumer_pos) {

tools/testing/selftests/bpf/prog_tests/ringbuf.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,9 @@ static void ringbuf_subtest(void)
9191
int err, cnt, rb_fd;
9292
int page_size = getpagesize();
9393
void *mmap_ptr, *tmp_ptr;
94+
struct ring *ring;
95+
int map_fd;
96+
unsigned long avail_data, ring_size, cons_pos, prod_pos;
9497

9598
skel = test_ringbuf_lskel__open();
9699
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
@@ -162,6 +165,13 @@ static void ringbuf_subtest(void)
162165

163166
trigger_samples();
164167

168+
ring = ring_buffer__ring(ringbuf, 0);
169+
if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
170+
goto cleanup;
171+
172+
map_fd = ring__map_fd(ring);
173+
ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
174+
165175
/* 2 submitted + 1 discarded records */
166176
CHECK(skel->bss->avail_data != 3 * rec_sz,
167177
"err_avail_size", "exp %ld, got %ld\n",
@@ -176,6 +186,18 @@ static void ringbuf_subtest(void)
176186
"err_prod_pos", "exp %ld, got %ld\n",
177187
3L * rec_sz, skel->bss->prod_pos);
178188

189+
/* verify getting this data directly via the ring object yields the same
190+
* results
191+
*/
192+
avail_data = ring__avail_data_size(ring);
193+
ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
194+
ring_size = ring__size(ring);
195+
ASSERT_EQ(ring_size, page_size, "ring_ring_size");
196+
cons_pos = ring__consumer_pos(ring);
197+
ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
198+
prod_pos = ring__producer_pos(ring);
199+
ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
200+
179201
/* poll for samples */
180202
err = ring_buffer__poll(ringbuf, -1);
181203

@@ -282,6 +304,10 @@ static void ringbuf_subtest(void)
282304
err = ring_buffer__consume(ringbuf);
283305
CHECK(err < 0, "rb_consume", "failed: %d\b", err);
284306

307+
/* also consume using ring__consume to make sure it works the same */
308+
err = ring__consume(ring);
309+
ASSERT_GE(err, 0, "ring_consume");
310+
285311
/* 3 rounds, 2 samples each */
286312
cnt = atomic_xchg(&sample_cnt, 0);
287313
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);

tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ void test_ringbuf_multi(void)
4242
{
4343
struct test_ringbuf_multi *skel;
4444
struct ring_buffer *ringbuf = NULL;
45+
struct ring *ring_old;
46+
struct ring *ring;
4547
int err;
4648
int page_size = getpagesize();
4749
int proto_fd = -1;
@@ -84,11 +86,24 @@ void test_ringbuf_multi(void)
8486
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
8587
goto cleanup;
8688

89+
/* verify ring_buffer__ring returns expected results */
90+
ring = ring_buffer__ring(ringbuf, 0);
91+
if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
92+
goto cleanup;
93+
ring_old = ring;
94+
ring = ring_buffer__ring(ringbuf, 1);
95+
ASSERT_ERR_PTR(ring, "ring_buffer__ring_idx_1");
96+
8797
err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2),
8898
process_sample, (void *)(long)2);
8999
if (CHECK(err, "ringbuf_add", "failed to add another ring\n"))
90100
goto cleanup;
91101

102+
/* verify adding a new ring didn't invalidate our older pointer */
103+
ring = ring_buffer__ring(ringbuf, 0);
104+
if (!ASSERT_EQ(ring, ring_old, "ring_buffer__ring_again"))
105+
goto cleanup;
106+
92107
err = test_ringbuf_multi__attach(skel);
93108
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
94109
goto cleanup;

0 commit comments

Comments
 (0)