@@ -197,7 +197,9 @@ bkey_cached_reuse(struct btree_key_cache *c)
197197 return ck ;
198198}
199199
200- static int btree_key_cache_create (struct btree_trans * trans , struct btree_path * path ,
200+ static int btree_key_cache_create (struct btree_trans * trans ,
201+ struct btree_path * path ,
202+ struct btree_path * ck_path ,
201203 struct bkey_s_c k )
202204{
203205 struct bch_fs * c = trans -> c ;
@@ -217,7 +219,7 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *
217219 key_u64s = min (256U , (key_u64s * 3 ) / 2 );
218220 key_u64s = roundup_pow_of_two (key_u64s );
219221
220- struct bkey_cached * ck = bkey_cached_alloc (trans , path , key_u64s );
222+ struct bkey_cached * ck = bkey_cached_alloc (trans , ck_path , key_u64s );
221223 int ret = PTR_ERR_OR_ZERO (ck );
222224 if (ret )
223225 return ret ;
@@ -226,19 +228,19 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *
226228 ck = bkey_cached_reuse (bc );
227229 if (unlikely (!ck )) {
228230 bch_err (c , "error allocating memory for key cache item, btree %s" ,
229- bch2_btree_id_str (path -> btree_id ));
231+ bch2_btree_id_str (ck_path -> btree_id ));
230232 return - BCH_ERR_ENOMEM_btree_key_cache_create ;
231233 }
232234 }
233235
234236 ck -> c .level = 0 ;
235- ck -> c .btree_id = path -> btree_id ;
236- ck -> key .btree_id = path -> btree_id ;
237- ck -> key .pos = path -> pos ;
237+ ck -> c .btree_id = ck_path -> btree_id ;
238+ ck -> key .btree_id = ck_path -> btree_id ;
239+ ck -> key .pos = ck_path -> pos ;
238240 ck -> flags = 1U << BKEY_CACHED_ACCESSED ;
239241
240242 if (unlikely (key_u64s > ck -> u64s )) {
241- mark_btree_node_locked_noreset (path , 0 , BTREE_NODE_UNLOCKED );
243+ mark_btree_node_locked_noreset (ck_path , 0 , BTREE_NODE_UNLOCKED );
242244
243245 struct bkey_i * new_k = allocate_dropping_locks (trans , ret ,
244246 kmalloc (key_u64s * sizeof (u64 ), _gfp ));
@@ -258,22 +260,29 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *
258260
259261 bkey_reassemble (ck -> k , k );
260262
263+ ret = bch2_btree_node_lock_write (trans , path , & path_l (path )-> b -> c );
264+ if (unlikely (ret ))
265+ goto err ;
266+
261267 ret = rhashtable_lookup_insert_fast (& bc -> table , & ck -> hash , bch2_btree_key_cache_params );
268+
269+ bch2_btree_node_unlock_write (trans , path , path_l (path )-> b );
270+
262271 if (unlikely (ret )) /* raced with another fill? */
263272 goto err ;
264273
265274 atomic_long_inc (& bc -> nr_keys );
266275 six_unlock_write (& ck -> c .lock );
267276
268- enum six_lock_type lock_want = __btree_lock_want (path , 0 );
277+ enum six_lock_type lock_want = __btree_lock_want (ck_path , 0 );
269278 if (lock_want == SIX_LOCK_read )
270279 six_lock_downgrade (& ck -> c .lock );
271- btree_path_cached_set (trans , path , ck , (enum btree_node_locked_type ) lock_want );
272- path -> uptodate = BTREE_ITER_UPTODATE ;
280+ btree_path_cached_set (trans , ck_path , ck , (enum btree_node_locked_type ) lock_want );
281+ ck_path -> uptodate = BTREE_ITER_UPTODATE ;
273282 return 0 ;
274283err :
275284 bkey_cached_free (bc , ck );
276- mark_btree_node_locked_noreset (path , 0 , BTREE_NODE_UNLOCKED );
285+ mark_btree_node_locked_noreset (ck_path , 0 , BTREE_NODE_UNLOCKED );
277286
278287 return ret ;
279288}
@@ -293,6 +302,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
293302 int ret ;
294303
295304 bch2_trans_iter_init (trans , & iter , ck_path -> btree_id , ck_path -> pos ,
305+ BTREE_ITER_intent |
296306 BTREE_ITER_key_cache_fill |
297307 BTREE_ITER_cached_nofill );
298308 iter .flags &= ~BTREE_ITER_with_journal ;
@@ -306,7 +316,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
306316 if (unlikely (ret ))
307317 goto out ;
308318
309- ret = btree_key_cache_create (trans , ck_path , k );
319+ ret = btree_key_cache_create (trans , btree_iter_path ( trans , & iter ), ck_path , k );
310320 if (ret )
311321 goto err ;
312322
0 commit comments