Skip to content

Commit 7d3851a

Browse files
Martin KaFai LauAlexei Starovoitov
authored andcommitted
selftests/bpf: Sanitize the SEC and inline usages in the bpf-tcp-cc tests
It is needed to remove the BPF_STRUCT_OPS usages from the tcp-cc tests because it is defined in bpf_tcp_helpers.h which is going to be retired. While at it, this patch consolidates all tcp-cc struct_ops programs to use the SEC("struct_ops") + BPF_PROG(). It also removes the unnecessary __always_inline usages from the tcp-cc tests. Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://lore.kernel.org/r/20240509175026.3423614-5-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent cc5b18c commit 7d3851a

10 files changed

Lines changed: 77 additions & 75 deletions

tools/testing/selftests/bpf/progs/bpf_cc_cubic.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,6 @@
1717
#include <bpf/bpf_helpers.h>
1818
#include <bpf/bpf_tracing.h>
1919

20-
#define BPF_STRUCT_OPS(name, args...) \
21-
SEC("struct_ops/"#name) \
22-
BPF_PROG(name, args)
23-
2420
#define USEC_PER_SEC 1000000UL
2521
#define TCP_PACING_SS_RATIO (200)
2622
#define TCP_PACING_CA_RATIO (120)
@@ -114,18 +110,21 @@ static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
114110
return flag & FLAG_DATA_ACKED;
115111
}
116112

117-
void BPF_STRUCT_OPS(bpf_cubic_init, struct sock *sk)
113+
SEC("struct_ops")
114+
void BPF_PROG(bpf_cubic_init, struct sock *sk)
118115
{
119116
cubictcp_init(sk);
120117
}
121118

122-
void BPF_STRUCT_OPS(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
119+
SEC("struct_ops")
120+
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
123121
{
124122
cubictcp_cwnd_event(sk, event);
125123
}
126124

127-
void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
128-
const struct rate_sample *rs)
125+
SEC("struct_ops")
126+
void BPF_PROG(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
127+
const struct rate_sample *rs)
129128
{
130129
struct tcp_sock *tp = tcp_sk(sk);
131130

@@ -151,23 +150,26 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag
151150
tcp_update_pacing_rate(sk);
152151
}
153152

154-
__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
153+
SEC("struct_ops")
154+
__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
155155
{
156156
return cubictcp_recalc_ssthresh(sk);
157157
}
158158

159-
void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
159+
SEC("struct_ops")
160+
void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
160161
{
161162
cubictcp_state(sk, new_state);
162163
}
163164

164-
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
165-
const struct ack_sample *sample)
165+
SEC("struct_ops")
166+
void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
166167
{
167168
cubictcp_acked(sk, sample);
168169
}
169170

170-
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
171+
SEC("struct_ops")
172+
__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
171173
{
172174
return tcp_reno_undo_cwnd(sk);
173175
}

tools/testing/selftests/bpf/progs/bpf_cubic.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ struct bictcp {
9191
__u32 curr_rtt; /* the minimum rtt of current round */
9292
};
9393

94-
static inline void bictcp_reset(struct bictcp *ca)
94+
static void bictcp_reset(struct bictcp *ca)
9595
{
9696
ca->cnt = 0;
9797
ca->last_max_cwnd = 0;
@@ -112,15 +112,15 @@ extern unsigned long CONFIG_HZ __kconfig;
112112
#define USEC_PER_SEC 1000000UL
113113
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
114114

115-
static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
115+
static __u64 div64_u64(__u64 dividend, __u64 divisor)
116116
{
117117
return dividend / divisor;
118118
}
119119

120120
#define div64_ul div64_u64
121121

122122
#define BITS_PER_U64 (sizeof(__u64) * 8)
123-
static __always_inline int fls64(__u64 x)
123+
static int fls64(__u64 x)
124124
{
125125
int num = BITS_PER_U64 - 1;
126126

@@ -153,12 +153,12 @@ static __always_inline int fls64(__u64 x)
153153
return num + 1;
154154
}
155155

156-
static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
156+
static __u32 bictcp_clock_us(const struct sock *sk)
157157
{
158158
return tcp_sk(sk)->tcp_mstamp;
159159
}
160160

161-
static __always_inline void bictcp_hystart_reset(struct sock *sk)
161+
static void bictcp_hystart_reset(struct sock *sk)
162162
{
163163
struct tcp_sock *tp = tcp_sk(sk);
164164
struct bictcp *ca = inet_csk_ca(sk);
@@ -169,8 +169,7 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
169169
ca->sample_cnt = 0;
170170
}
171171

172-
/* "struct_ops/" prefix is a requirement */
173-
SEC("struct_ops/bpf_cubic_init")
172+
SEC("struct_ops")
174173
void BPF_PROG(bpf_cubic_init, struct sock *sk)
175174
{
176175
struct bictcp *ca = inet_csk_ca(sk);
@@ -184,8 +183,7 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk)
184183
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
185184
}
186185

187-
/* "struct_ops" prefix is a requirement */
188-
SEC("struct_ops/bpf_cubic_cwnd_event")
186+
SEC("struct_ops")
189187
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
190188
{
191189
if (event == CA_EVENT_TX_START) {
@@ -230,7 +228,7 @@ static const __u8 v[] = {
230228
* Newton-Raphson iteration.
231229
* Avg err ~= 0.195%
232230
*/
233-
static __always_inline __u32 cubic_root(__u64 a)
231+
static __u32 cubic_root(__u64 a)
234232
{
235233
__u32 x, b, shift;
236234

@@ -263,8 +261,7 @@ static __always_inline __u32 cubic_root(__u64 a)
263261
/*
264262
* Compute congestion window to use.
265263
*/
266-
static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
267-
__u32 acked)
264+
static void bictcp_update(struct bictcp *ca, __u32 cwnd, __u32 acked)
268265
{
269266
__u32 delta, bic_target, max_cnt;
270267
__u64 offs, t;
@@ -377,8 +374,8 @@ static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
377374
ca->cnt = max(ca->cnt, 2U);
378375
}
379376

380-
/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
381-
void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
377+
SEC("struct_ops")
378+
void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
382379
{
383380
struct tcp_sock *tp = tcp_sk(sk);
384381
struct bictcp *ca = inet_csk_ca(sk);
@@ -397,7 +394,8 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acke
397394
tcp_cong_avoid_ai(tp, ca->cnt, acked);
398395
}
399396

400-
__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
397+
SEC("struct_ops")
398+
__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
401399
{
402400
const struct tcp_sock *tp = tcp_sk(sk);
403401
struct bictcp *ca = inet_csk_ca(sk);
@@ -414,7 +412,8 @@ __u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
414412
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
415413
}
416414

417-
void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
415+
SEC("struct_ops")
416+
void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
418417
{
419418
if (new_state == TCP_CA_Loss) {
420419
bictcp_reset(inet_csk_ca(sk));
@@ -433,7 +432,7 @@ void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
433432
* We apply another 100% factor because @rate is doubled at this point.
434433
* We cap the cushion to 1ms.
435434
*/
436-
static __always_inline __u32 hystart_ack_delay(struct sock *sk)
435+
static __u32 hystart_ack_delay(struct sock *sk)
437436
{
438437
unsigned long rate;
439438

@@ -444,7 +443,7 @@ static __always_inline __u32 hystart_ack_delay(struct sock *sk)
444443
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
445444
}
446445

447-
static __always_inline void hystart_update(struct sock *sk, __u32 delay)
446+
static void hystart_update(struct sock *sk, __u32 delay)
448447
{
449448
struct tcp_sock *tp = tcp_sk(sk);
450449
struct bictcp *ca = inet_csk_ca(sk);
@@ -492,8 +491,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
492491

493492
int bpf_cubic_acked_called = 0;
494493

495-
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
496-
const struct ack_sample *sample)
494+
SEC("struct_ops")
495+
void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
497496
{
498497
const struct tcp_sock *tp = tcp_sk(sk);
499498
struct bictcp *ca = inet_csk_ca(sk);
@@ -524,7 +523,8 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
524523

525524
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
526525

527-
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
526+
SEC("struct_ops")
527+
__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
528528
{
529529
return tcp_reno_undo_cwnd(sk);
530530
}

tools/testing/selftests/bpf/progs/bpf_dctcp.c

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -48,16 +48,15 @@ struct dctcp {
4848
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
4949
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
5050

51-
static __always_inline void dctcp_reset(const struct tcp_sock *tp,
52-
struct dctcp *ca)
51+
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
5352
{
5453
ca->next_seq = tp->snd_nxt;
5554

5655
ca->old_delivered = tp->delivered;
5756
ca->old_delivered_ce = tp->delivered_ce;
5857
}
5958

60-
SEC("struct_ops/dctcp_init")
59+
SEC("struct_ops")
6160
void BPF_PROG(dctcp_init, struct sock *sk)
6261
{
6362
const struct tcp_sock *tp = tcp_sk(sk);
@@ -104,7 +103,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
104103
dctcp_reset(tp, ca);
105104
}
106105

107-
SEC("struct_ops/dctcp_ssthresh")
106+
SEC("struct_ops")
108107
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
109108
{
110109
struct dctcp *ca = inet_csk_ca(sk);
@@ -114,7 +113,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
114113
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
115114
}
116115

117-
SEC("struct_ops/dctcp_update_alpha")
116+
SEC("struct_ops")
118117
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
119118
{
120119
const struct tcp_sock *tp = tcp_sk(sk);
@@ -144,7 +143,7 @@ void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
144143
}
145144
}
146145

147-
static __always_inline void dctcp_react_to_loss(struct sock *sk)
146+
static void dctcp_react_to_loss(struct sock *sk)
148147
{
149148
struct dctcp *ca = inet_csk_ca(sk);
150149
struct tcp_sock *tp = tcp_sk(sk);
@@ -153,7 +152,7 @@ static __always_inline void dctcp_react_to_loss(struct sock *sk)
153152
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
154153
}
155154

156-
SEC("struct_ops/dctcp_state")
155+
SEC("struct_ops")
157156
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
158157
{
159158
if (new_state == TCP_CA_Recovery &&
@@ -164,7 +163,7 @@ void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
164163
*/
165164
}
166165

167-
static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
166+
static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
168167
{
169168
struct tcp_sock *tp = tcp_sk(sk);
170169

@@ -179,9 +178,8 @@ static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
179178
* S: 0 <- last pkt was non-CE
180179
* 1 <- last pkt was CE
181180
*/
182-
static __always_inline
183-
void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
184-
__u32 *prior_rcv_nxt, __u32 *ce_state)
181+
static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
182+
__u32 *prior_rcv_nxt, __u32 *ce_state)
185183
{
186184
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
187185

@@ -201,7 +199,7 @@ void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
201199
dctcp_ece_ack_cwr(sk, new_ce_state);
202200
}
203201

204-
SEC("struct_ops/dctcp_cwnd_event")
202+
SEC("struct_ops")
205203
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
206204
{
207205
struct dctcp *ca = inet_csk_ca(sk);
@@ -220,7 +218,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
220218
}
221219
}
222220

223-
SEC("struct_ops/dctcp_cwnd_undo")
221+
SEC("struct_ops")
224222
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
225223
{
226224
const struct dctcp *ca = inet_csk_ca(sk);
@@ -230,7 +228,7 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
230228

231229
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
232230

233-
SEC("struct_ops/dctcp_reno_cong_avoid")
231+
SEC("struct_ops")
234232
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
235233
{
236234
tcp_reno_cong_avoid(sk, ack, acked);

tools/testing/selftests/bpf/progs/bpf_dctcp_release.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313
char _license[] SEC("license") = "GPL";
1414
const char cubic[] = "cubic";
1515

16-
void BPF_STRUCT_OPS(dctcp_nouse_release, struct sock *sk)
16+
SEC("struct_ops")
17+
void BPF_PROG(dctcp_nouse_release, struct sock *sk)
1718
{
1819
bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
1920
(void *)cubic, sizeof(cubic));

tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88

99
char _license[] SEC("license") = "X";
1010

11-
void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
11+
SEC("struct_ops")
12+
void BPF_PROG(nogpltcp_init, struct sock *sk)
1213
{
1314
}
1415

tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@
66

77
char _license[] SEC("license") = "GPL";
88

9-
SEC("struct_ops/incompl_cong_ops_ssthresh")
9+
SEC("struct_ops")
1010
__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
1111
{
1212
return tcp_sk(sk)->snd_ssthresh;
1313
}
1414

15-
SEC("struct_ops/incompl_cong_ops_undo_cwnd")
15+
SEC("struct_ops")
1616
__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
1717
{
1818
return tcp_sk(sk)->snd_cwnd;

0 commit comments

Comments
 (0)