33#include "machine.h"
44#include "thread.h"
55
6- struct thread_rb_node {
7- struct rb_node rb_node ;
8- struct thread * thread ;
9- };
10-
116static struct threads_table_entry * threads__table (struct threads * threads , pid_t tid )
127{
138 /* Cast it to handle tid == -1 */
149 return & threads -> table [(unsigned int )tid % THREADS__TABLE_SIZE ];
1510}
1611
12+ static size_t key_hash (long key , void * ctx __maybe_unused )
13+ {
14+ /* The table lookup removes low bit entropy, but this is just ignored here. */
15+ return key ;
16+ }
17+
18+ static bool key_equal (long key1 , long key2 , void * ctx __maybe_unused )
19+ {
20+ return key1 == key2 ;
21+ }
22+
1723void threads__init (struct threads * threads )
1824{
1925 for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
2026 struct threads_table_entry * table = & threads -> table [i ];
2127
22- table -> entries = RB_ROOT_CACHED ;
28+ hashmap__init ( & table -> shard , key_hash , key_equal , NULL ) ;
2329 init_rwsem (& table -> lock );
24- table -> nr = 0 ;
2530 table -> last_match = NULL ;
2631 }
2732}
@@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
3237 for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
3338 struct threads_table_entry * table = & threads -> table [i ];
3439
40+ hashmap__clear (& table -> shard );
3541 exit_rwsem (& table -> lock );
3642 }
3743}
@@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
4450 struct threads_table_entry * table = & threads -> table [i ];
4551
4652 down_read (& table -> lock );
47- nr += table -> nr ;
53+ nr += hashmap__size ( & table -> shard ) ;
4854 up_read (& table -> lock );
4955 }
5056 return nr ;
@@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
8692struct thread * threads__find (struct threads * threads , pid_t tid )
8793{
8894 struct threads_table_entry * table = threads__table (threads , tid );
89- struct rb_node * * p ;
90- struct thread * res = NULL ;
95+ struct thread * res ;
9196
9297 down_read (& table -> lock );
9398 res = __threads_table_entry__get_last_match (table , tid );
94- if (res )
95- return res ;
96-
97- p = & table -> entries .rb_root .rb_node ;
98- while (* p != NULL ) {
99- struct rb_node * parent = * p ;
100- struct thread * th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
101-
102- if (thread__tid (th ) == tid ) {
103- res = thread__get (th );
104- break ;
105- }
106-
107- if (tid < thread__tid (th ))
108- p = & (* p )-> rb_left ;
109- else
110- p = & (* p )-> rb_right ;
99+ if (!res ) {
100+ if (hashmap__find (& table -> shard , tid , & res ))
101+ res = thread__get (res );
111102 }
112103 up_read (& table -> lock );
113104 if (res )
@@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
118109struct thread * threads__findnew (struct threads * threads , pid_t pid , pid_t tid , bool * created )
119110{
120111 struct threads_table_entry * table = threads__table (threads , tid );
121- struct rb_node * * p ;
122- struct rb_node * parent = NULL ;
123112 struct thread * res = NULL ;
124- struct thread_rb_node * nd ;
125- bool leftmost = true;
126113
127114 * created = false;
128115 down_write (& table -> lock );
129- p = & table -> entries .rb_root .rb_node ;
130- while (* p != NULL ) {
131- struct thread * th ;
132-
133- parent = * p ;
134- th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
135-
136- if (thread__tid (th ) == tid ) {
137- __threads_table_entry__set_last_match (table , th );
138- res = thread__get (th );
139- goto out_unlock ;
140- }
141-
142- if (tid < thread__tid (th ))
143- p = & (* p )-> rb_left ;
144- else {
145- leftmost = false;
146- p = & (* p )-> rb_right ;
147- }
148- }
149- nd = malloc (sizeof (* nd ));
150- if (nd == NULL )
151- goto out_unlock ;
152116 res = thread__new (pid , tid );
153- if (!res )
154- free (nd );
155- else {
156- * created = true;
157- nd -> thread = thread__get (res );
158- rb_link_node (& nd -> rb_node , parent , p );
159- rb_insert_color_cached (& nd -> rb_node , & table -> entries , leftmost );
160- ++ table -> nr ;
161- __threads_table_entry__set_last_match (table , res );
117+ if (res ) {
118+ if (hashmap__add (& table -> shard , tid , res )) {
119+ /* Add failed. Assume a race so find other entry. */
120+ thread__put (res );
121+ res = NULL ;
122+ if (hashmap__find (& table -> shard , tid , & res ))
123+ res = thread__get (res );
124+ } else {
125+ res = thread__get (res );
126+ * created = true;
127+ }
128+ if (res )
129+ __threads_table_entry__set_last_match (table , res );
162130 }
163- out_unlock :
164131 up_write (& table -> lock );
165132 return res ;
166133}
@@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
169136{
170137 for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
171138 struct threads_table_entry * table = & threads -> table [i ];
172- struct rb_node * nd ;
139+ struct hashmap_entry * cur , * tmp ;
140+ size_t bkt ;
173141
174142 down_write (& table -> lock );
175143 __threads_table_entry__set_last_match (table , NULL );
176- nd = rb_first_cached (& table -> entries );
177- while (nd ) {
178- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
179-
180- nd = rb_next (nd );
181- thread__put (trb -> thread );
182- rb_erase_cached (& trb -> rb_node , & table -> entries );
183- RB_CLEAR_NODE (& trb -> rb_node );
184- -- table -> nr ;
144+ hashmap__for_each_entry_safe ((& table -> shard ), cur , tmp , bkt ) {
145+ struct thread * old_value ;
185146
186- free (trb );
147+ hashmap__delete (& table -> shard , cur -> key , /*old_key=*/ NULL , & old_value );
148+ thread__put (old_value );
187149 }
188- assert (table -> nr == 0 );
189150 up_write (& table -> lock );
190151 }
191152}
192153
193154void threads__remove (struct threads * threads , struct thread * thread )
194155{
195- struct rb_node * * p ;
196156 struct threads_table_entry * table = threads__table (threads , thread__tid (thread ));
197- pid_t tid = thread__tid ( thread ) ;
157+ struct thread * old_value ;
198158
199159 down_write (& table -> lock );
200160 if (table -> last_match && RC_CHK_EQUAL (table -> last_match , thread ))
201161 __threads_table_entry__set_last_match (table , NULL );
202162
203- p = & table -> entries .rb_root .rb_node ;
204- while (* p != NULL ) {
205- struct rb_node * parent = * p ;
206- struct thread_rb_node * nd = rb_entry (parent , struct thread_rb_node , rb_node );
207- struct thread * th = nd -> thread ;
208-
209- if (RC_CHK_EQUAL (th , thread )) {
210- thread__put (nd -> thread );
211- rb_erase_cached (& nd -> rb_node , & table -> entries );
212- RB_CLEAR_NODE (& nd -> rb_node );
213- -- table -> nr ;
214- free (nd );
215- break ;
216- }
217-
218- if (tid < thread__tid (th ))
219- p = & (* p )-> rb_left ;
220- else
221- p = & (* p )-> rb_right ;
222- }
163+ hashmap__delete (& table -> shard , thread__tid (thread ), /*old_key=*/ NULL , & old_value );
164+ thread__put (old_value );
223165 up_write (& table -> lock );
224166}
225167
@@ -229,12 +171,12 @@ int threads__for_each_thread(struct threads *threads,
229171{
230172 for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
231173 struct threads_table_entry * table = & threads -> table [i ];
232- struct rb_node * nd ;
174+ struct hashmap_entry * cur ;
175+ size_t bkt ;
233176
234177 down_read (& table -> lock );
235- for (nd = rb_first_cached (& table -> entries ); nd ; nd = rb_next (nd )) {
236- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
237- int rc = fn (trb -> thread , data );
178+ hashmap__for_each_entry ((& table -> shard ), cur , bkt ) {
179+ int rc = fn ((struct thread * )cur -> pvalue , data );
238180
239181 if (rc != 0 ) {
240182 up_read (& table -> lock );
0 commit comments