Skip to content

Commit 5ba7119

Browse files
DarksonnYuryNorov
authored andcommitted
rust_binder: use bitmap for allocation of handles
To find an unused Binder handle, Rust Binder currently iterates the red/black tree from the beginning until it finds a gap in the keys. This is extremely slow. To improve the performance, add a bitmap that keeps track of which indices are actually in use. This allows us to quickly find an unused key in the red/black tree. For a benchmark, please see the below numbers that were obtained from modifying binderThroughputTest to send a node with each transaction and stashing it in the server. This results in the number of nodes increasing by one for every transaction sent. I got the following table of roundtrip latencies (in µs): Transaction Range │ Baseline (Rust) │ Bitmap (Rust) │ Comparison (C) 0 - 10,000 │ 176.88 │ 92.93 │ 99.41 10,000 - 20,000 │ 437.37 │ 87.74 │ 98.55 20,000 - 30,000 │ 677.49 │ 76.24 │ 96.37 30,000 - 40,000 │ 901.76 │ 83.39 │ 96.73 40,000 - 50,000 │ 1126.62 │ 100.44 │ 94.57 50,000 - 60,000 │ 1288.98 │ 94.38 │ 96.64 60,000 - 70,000 │ 1588.74 │ 88.27 │ 96.36 70,000 - 80,000 │ 1812.97 │ 93.97 │ 91.24 80,000 - 90,000 │ 2062.95 │ 92.22 │ 102.01 90,000 - 100,000 │ 2330.03 │ 97.18 │ 100.31 It should be clear that the current Rust code becomes linearly slower per insertion as the number of calls to rb_next() per transaction increases. After this change, the time to find an ID number appears constant. (Technically it is not constant-time as both insertion and removal scan the entire bitmap. However, quick napkin math shows that scanning the entire bitmap with N=100k takes ~1.5µs, which is neglible in a benchmark where the rountrip latency is 100µs.) I've included a comparison to the C driver, which uses the same bitmap algorithm as this patch since commit 15d9da3 ("binder: use bitmap for faster descriptor lookup"). This currently checks if the bitmap should be shrunk after every removal. One potential future change is introducing a shrinker to make this operation O(1), but based on the benchmark above this does not seem required at this time. Reviewed-by: Burak Emir <bqe@google.com> Reviewed-by: Yury Norov (NVIDIA) <yury.norov@gmail.com> Acked-by: Carlos Llamas <cmllamas@google.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
1 parent f523d11 commit 5ba7119

1 file changed

Lines changed: 47 additions & 17 deletions

File tree

drivers/android/binder/process.rs

Lines changed: 47 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ use kernel::{
1919
cred::Credential,
2020
error::Error,
2121
fs::file::{self, File},
22+
id_pool::IdPool,
2223
list::{List, ListArc, ListArcField, ListLinks},
2324
mm,
2425
prelude::*,
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
394395
struct ProcessNodeRefs {
395396
/// Used to look up nodes using the 32-bit id that this process knows it by.
396397
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
398+
/// Used to quickly find unused ids in `by_handle`.
399+
handle_is_present: IdPool,
397400
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
398401
/// the underlying `Node` struct as returned by `Node::global_id`.
399402
by_node: RBTree<usize, u32>,
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
408411
fn new() -> Self {
409412
Self {
410413
by_handle: RBTree::new(),
414+
handle_is_present: IdPool::new(),
411415
by_node: RBTree::new(),
412416
freeze_listeners: RBTree::new(),
413417
}
@@ -802,7 +806,7 @@ impl Process {
802806
pub(crate) fn insert_or_update_handle(
803807
self: ArcBorrow<'_, Process>,
804808
node_ref: NodeRef,
805-
is_mananger: bool,
809+
is_manager: bool,
806810
) -> Result<u32> {
807811
{
808812
let mut refs = self.node_refs.lock();
@@ -821,7 +825,33 @@ impl Process {
821825
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
822826
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
823827

824-
let mut refs = self.node_refs.lock();
828+
let mut refs_lock = self.node_refs.lock();
829+
let mut refs = &mut *refs_lock;
830+
831+
let (unused_id, by_handle_slot) = loop {
832+
// ID 0 may only be used by the manager.
833+
let start = if is_manager { 0 } else { 1 };
834+
835+
if let Some(res) = refs.handle_is_present.find_unused_id(start) {
836+
match refs.by_handle.entry(res.as_u32()) {
837+
rbtree::Entry::Vacant(entry) => break (res, entry),
838+
rbtree::Entry::Occupied(_) => {
839+
pr_err!("Detected mismatch between handle_is_present and by_handle");
840+
res.acquire();
841+
kernel::warn_on!(true);
842+
return Err(EINVAL);
843+
}
844+
}
845+
}
846+
847+
let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
848+
drop(refs_lock);
849+
let resizer = grow_request.realloc(GFP_KERNEL)?;
850+
refs_lock = self.node_refs.lock();
851+
refs = &mut *refs_lock;
852+
refs.handle_is_present.grow(resizer);
853+
};
854+
let handle = unused_id.as_u32();
825855

826856
// Do a lookup again as node may have been inserted before the lock was reacquired.
827857
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -831,20 +861,9 @@ impl Process {
831861
return Ok(handle);
832862
}
833863

834-
// Find id.
835-
let mut target: u32 = if is_mananger { 0 } else { 1 };
836-
for handle in refs.by_handle.keys() {
837-
if *handle > target {
838-
break;
839-
}
840-
if *handle == target {
841-
target = target.checked_add(1).ok_or(ENOMEM)?;
842-
}
843-
}
844-
845864
let gid = node_ref.node.global_id();
846865
let (info_proc, info_node) = {
847-
let info_init = NodeRefInfo::new(node_ref, target, self.into());
866+
let info_init = NodeRefInfo::new(node_ref, handle, self.into());
848867
match info.pin_init_with(info_init) {
849868
Ok(info) => ListArc::pair_from_pin_unique(info),
850869
// error is infallible
@@ -865,9 +884,10 @@ impl Process {
865884
// `info_node` into the right node's `refs` list.
866885
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
867886

868-
refs.by_node.insert(reserve1.into_node(gid, target));
869-
refs.by_handle.insert(reserve2.into_node(target, info_proc));
870-
Ok(target)
887+
refs.by_node.insert(reserve1.into_node(gid, handle));
888+
by_handle_slot.insert(info_proc, reserve2);
889+
unused_id.acquire();
890+
Ok(handle)
871891
}
872892

873893
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -932,6 +952,16 @@ impl Process {
932952
let id = info.node_ref().node.global_id();
933953
refs.by_handle.remove(&handle);
934954
refs.by_node.remove(&id);
955+
refs.handle_is_present.release_id(handle as usize);
956+
957+
if let Some(shrink) = refs.handle_is_present.shrink_request() {
958+
drop(refs);
959+
// This intentionally ignores allocation failures.
960+
if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
961+
refs = self.node_refs.lock();
962+
refs.handle_is_present.shrink(new_bitmap);
963+
}
964+
}
935965
}
936966
} else {
937967
// All refs are cleared in process exit, so this warning is expected in that case.

0 commit comments

Comments
 (0)