Skip to content

Commit 5254de7

Browse files
ameryhungMartin KaFai Lau
authored andcommitted
bpf: Remove cgroup local storage percpu counter
The percpu counter in cgroup local storage is no longer needed as the underlying bpf_local_storage can now handle deadlock with the help of rqspinlock. Remove the percpu counter and related migrate_{disable, enable}. Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Amery Hung <ameryhung@gmail.com> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://patch.msgid.link/20260205222916.1788211-8-ameryhung@gmail.com
1 parent 4a98c2e commit 5254de7

1 file changed

Lines changed: 8 additions & 51 deletions

File tree

kernel/bpf/bpf_cgrp_storage.c

Lines changed: 8 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -11,29 +11,6 @@
1111

1212
DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
1313

14-
static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
15-
16-
static void bpf_cgrp_storage_lock(void)
17-
{
18-
cant_migrate();
19-
this_cpu_inc(bpf_cgrp_storage_busy);
20-
}
21-
22-
static void bpf_cgrp_storage_unlock(void)
23-
{
24-
this_cpu_dec(bpf_cgrp_storage_busy);
25-
}
26-
27-
static bool bpf_cgrp_storage_trylock(void)
28-
{
29-
cant_migrate();
30-
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
31-
this_cpu_dec(bpf_cgrp_storage_busy);
32-
return false;
33-
}
34-
return true;
35-
}
36-
3714
static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
3815
{
3916
struct cgroup *cg = owner;
@@ -45,16 +22,14 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
4522
{
4623
struct bpf_local_storage *local_storage;
4724

48-
rcu_read_lock_dont_migrate();
25+
rcu_read_lock();
4926
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
5027
if (!local_storage)
5128
goto out;
5229

53-
bpf_cgrp_storage_lock();
5430
bpf_local_storage_destroy(local_storage);
55-
bpf_cgrp_storage_unlock();
5631
out:
57-
rcu_read_unlock_migrate();
32+
rcu_read_unlock();
5833
}
5934

6035
static struct bpf_local_storage_data *
@@ -83,9 +58,7 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
8358
if (IS_ERR(cgroup))
8459
return ERR_CAST(cgroup);
8560

86-
bpf_cgrp_storage_lock();
8761
sdata = cgroup_storage_lookup(cgroup, map, true);
88-
bpf_cgrp_storage_unlock();
8962
cgroup_put(cgroup);
9063
return sdata ? sdata->data : NULL;
9164
}
@@ -102,10 +75,8 @@ static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
10275
if (IS_ERR(cgroup))
10376
return PTR_ERR(cgroup);
10477

105-
bpf_cgrp_storage_lock();
10678
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
10779
value, map_flags, false, GFP_ATOMIC);
108-
bpf_cgrp_storage_unlock();
10980
cgroup_put(cgroup);
11081
return PTR_ERR_OR_ZERO(sdata);
11182
}
@@ -131,9 +102,7 @@ static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
131102
if (IS_ERR(cgroup))
132103
return PTR_ERR(cgroup);
133104

134-
bpf_cgrp_storage_lock();
135105
err = cgroup_storage_delete(cgroup, map);
136-
bpf_cgrp_storage_unlock();
137106
cgroup_put(cgroup);
138107
return err;
139108
}
@@ -150,15 +119,14 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
150119

151120
static void cgroup_storage_map_free(struct bpf_map *map)
152121
{
153-
bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
122+
bpf_local_storage_map_free(map, &cgroup_cache, NULL);
154123
}
155124

156125
/* *gfp_flags* is a hidden argument provided by the verifier */
157126
BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
158127
void *, value, u64, flags, gfp_t, gfp_flags)
159128
{
160129
struct bpf_local_storage_data *sdata;
161-
bool nobusy;
162130

163131
WARN_ON_ONCE(!bpf_rcu_lock_held());
164132
if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
@@ -167,38 +135,27 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
167135
if (!cgroup)
168136
return (unsigned long)NULL;
169137

170-
nobusy = bpf_cgrp_storage_trylock();
171-
172-
sdata = cgroup_storage_lookup(cgroup, map, nobusy);
138+
sdata = cgroup_storage_lookup(cgroup, map, true);
173139
if (sdata)
174-
goto unlock;
140+
goto out;
175141

176142
/* only allocate new storage, when the cgroup is refcounted */
177143
if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
178-
(flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy)
144+
(flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
179145
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
180146
value, BPF_NOEXIST, false, gfp_flags);
181147

182-
unlock:
183-
if (nobusy)
184-
bpf_cgrp_storage_unlock();
148+
out:
185149
return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
186150
}
187151

188152
BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
189153
{
190-
int ret;
191-
192154
WARN_ON_ONCE(!bpf_rcu_lock_held());
193155
if (!cgroup)
194156
return -EINVAL;
195157

196-
if (!bpf_cgrp_storage_trylock())
197-
return -EBUSY;
198-
199-
ret = cgroup_storage_delete(cgroup, map);
200-
bpf_cgrp_storage_unlock();
201-
return ret;
158+
return cgroup_storage_delete(cgroup, map);
202159
}
203160

204161
const struct bpf_map_ops cgrp_storage_map_ops = {

0 commit comments

Comments
 (0)