Skip to content

Commit 20d387d

Browse files
Hongru Zhangpcmoore
authored andcommitted
selinux: improve bucket distribution uniformity of avc_hash()
Reuse the already implemented MurmurHash3 algorithm. Under heavy stress testing (on an 8-core system sustaining over 50,000 authentication events per second), sample once per second and take the mean of 1800 samples: 1. Bucket utilization rate and length of longest chain +--------------------------+-----------------------------------------+ | | bucket utilization rate / longest chain | | +--------------------+--------------------+ | | no-patch | with-patch | +--------------------------+--------------------+--------------------+ | 512 nodes, 512 buckets | 52.5%/7.5 | 60.2%/5.7 | +--------------------------+--------------------+--------------------+ | 1024 nodes, 512 buckets | 68.9%/12.1 | 80.2%/9.7 | +--------------------------+--------------------+--------------------+ | 2048 nodes, 512 buckets | 83.7%/19.4 | 93.4%/16.3 | +--------------------------+--------------------+--------------------+ | 8192 nodes, 8192 buckets | 49.5%/11.4 | 60.3%/7.4 | +--------------------------+--------------------+--------------------+ 2. avc_search_node latency (total latency of hash operation and table lookup) +--------------------------+-----------------------------------------+ | | latency of function avc_search_node | | +--------------------+--------------------+ | | no-patch | with-patch | +--------------------------+--------------------+--------------------+ | 512 nodes, 512 buckets | 87ns | 84ns | +--------------------------+--------------------+--------------------+ | 1024 nodes, 512 buckets | 97ns | 96ns | +--------------------------+--------------------+--------------------+ | 2048 nodes, 512 buckets | 118ns | 113ns | +--------------------------+--------------------+--------------------+ | 8192 nodes, 8192 buckets | 106ns | 99ns | +--------------------------+--------------------+--------------------+ Although MurmurHash3 has higher overhead than the bitwise operations in the original algorithm, the data shows that the MurmurHash3 achieves better distribution, reducing average lookup time. Consequently, the total latency of hashing and table lookup is lower than before. Signed-off-by: Hongru Zhang <zhanghongru@xiaomi.com> [PM: whitespace fixes] Signed-off-by: Paul Moore <paul@paul-moore.com>
1 parent 929126e commit 20d387d

3 files changed

Lines changed: 14 additions & 6 deletions

File tree

security/selinux/avc.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
#include "avc.h"
3131
#include "avc_ss.h"
3232
#include "classmap.h"
33+
#include "hash.h"
3334

3435
#define CREATE_TRACE_POINTS
3536
#include <trace/events/avc.h>
@@ -124,7 +125,7 @@ static struct kmem_cache *avc_xperms_cachep __ro_after_init;
124125

125126
static inline u32 avc_hash(u32 ssid, u32 tsid, u16 tclass)
126127
{
127-
return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
128+
return av_hash(ssid, tsid, (u32)tclass, (u32)(AVC_CACHE_SLOTS - 1));
128129
}
129130

130131
/**

security/selinux/include/hash.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,11 @@
33
#ifndef _SELINUX_HASH_H_
44
#define _SELINUX_HASH_H_
55

6-
/* Based on MurmurHash3, written by Austin Appleby and placed in the
6+
/*
7+
* Based on MurmurHash3, written by Austin Appleby and placed in the
78
* public domain.
89
*/
9-
static inline u32 avtab_hash(const struct avtab_key *keyp, u32 mask)
10+
static inline u32 av_hash(u32 key1, u32 key2, u32 key3, u32 mask)
1011
{
1112
static const u32 c1 = 0xcc9e2d51;
1213
static const u32 c2 = 0x1b873593;
@@ -28,9 +29,9 @@ static inline u32 avtab_hash(const struct avtab_key *keyp, u32 mask)
2829
hash = hash * m + n; \
2930
} while (0)
3031

31-
mix(keyp->target_class);
32-
mix(keyp->target_type);
33-
mix(keyp->source_type);
32+
mix(key1);
33+
mix(key2);
34+
mix(key3);
3435

3536
#undef mix
3637

security/selinux/ss/avtab.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,12 @@
2525
static struct kmem_cache *avtab_node_cachep __ro_after_init;
2626
static struct kmem_cache *avtab_xperms_cachep __ro_after_init;
2727

28+
static inline u32 avtab_hash(const struct avtab_key *keyp, u32 mask)
29+
{
30+
return av_hash((u32)keyp->target_class, (u32)keyp->target_type,
31+
(u32)keyp->source_type, mask);
32+
}
33+
2834
static struct avtab_node *avtab_insert_node(struct avtab *h,
2935
struct avtab_node **dst,
3036
const struct avtab_key *key,

0 commit comments

Comments
 (0)