@@ -112,7 +112,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
112112 unsigned flags = memalloc_nofs_save ();
113113 void * p ;
114114
115- BUG_ON (size > btree_bytes ( c ) );
115+ BUG_ON (size > c -> opts . btree_node_size );
116116
117117 * used_mempool = false;
118118 p = vpmalloc (size , __GFP_NOWARN |GFP_NOWAIT );
@@ -174,8 +174,8 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
174174
175175 ptrs = ptrs_end = ((void * ) new_whiteouts + bytes );
176176
177- for (k = unwritten_whiteouts_start (c , b );
178- k != unwritten_whiteouts_end (c , b );
177+ for (k = unwritten_whiteouts_start (b );
178+ k != unwritten_whiteouts_end (b );
179179 k = bkey_p_next (k ))
180180 * -- ptrs = k ;
181181
@@ -192,7 +192,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
192192 verify_no_dups (b , new_whiteouts ,
193193 (void * ) ((u64 * ) new_whiteouts + b -> whiteout_u64s ));
194194
195- memcpy_u64s (unwritten_whiteouts_start (c , b ),
195+ memcpy_u64s (unwritten_whiteouts_start (b ),
196196 new_whiteouts , b -> whiteout_u64s );
197197
198198 btree_bounce_free (c , bytes , used_mempool , new_whiteouts );
@@ -313,7 +313,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
313313 }
314314
315315 bytes = sorting_entire_node
316- ? btree_bytes ( c )
316+ ? btree_buf_bytes ( b )
317317 : __vstruct_bytes (struct btree_node , u64s );
318318
319319 out = btree_bounce_alloc (c , bytes , & used_mempool );
@@ -338,7 +338,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
338338 if (sorting_entire_node ) {
339339 u64s = le16_to_cpu (out -> keys .u64s );
340340
341- BUG_ON (bytes != btree_bytes ( c ));
341+ BUG_ON (bytes != btree_buf_bytes ( b ));
342342
343343 /*
344344 * Our temporary buffer is the same size as the btree node's
@@ -502,7 +502,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
502502
503503 bne = want_new_bset (c , b );
504504 if (bne )
505- bch2_bset_init_next (c , b , bne );
505+ bch2_bset_init_next (b , bne );
506506
507507 bch2_btree_build_aux_trees (b );
508508
@@ -1160,7 +1160,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
11601160 ptr_written , b -> written );
11611161 } else {
11621162 for (bne = write_block (b );
1163- bset_byte_offset (b , bne ) < btree_bytes ( c );
1163+ bset_byte_offset (b , bne ) < btree_buf_bytes ( b );
11641164 bne = (void * ) bne + block_bytes (c ))
11651165 btree_err_on (bne -> keys .seq == b -> data -> keys .seq &&
11661166 !bch2_journal_seq_is_blacklisted (c ,
@@ -1172,7 +1172,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
11721172 "found bset signature after last bset" );
11731173 }
11741174
1175- sorted = btree_bounce_alloc (c , btree_bytes ( c ), & used_mempool );
1175+ sorted = btree_bounce_alloc (c , btree_buf_bytes ( b ), & used_mempool );
11761176 sorted -> keys .u64s = 0 ;
11771177
11781178 set_btree_bset (b , b -> set , & b -> data -> keys );
@@ -1188,7 +1188,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
11881188
11891189 BUG_ON (b -> nr .live_u64s != u64s );
11901190
1191- btree_bounce_free (c , btree_bytes ( c ), used_mempool , sorted );
1191+ btree_bounce_free (c , btree_buf_bytes ( b ), used_mempool , sorted );
11921192
11931193 if (updated_range )
11941194 bch2_btree_node_drop_keys_outside_node (b );
@@ -1284,7 +1284,7 @@ static void btree_node_read_work(struct work_struct *work)
12841284 rb -> have_ioref = bch2_dev_get_ioref (ca , READ );
12851285 bio_reset (bio , NULL , REQ_OP_READ |REQ_SYNC |REQ_META );
12861286 bio -> bi_iter .bi_sector = rb -> pick .ptr .offset ;
1287- bio -> bi_iter .bi_size = btree_bytes ( c );
1287+ bio -> bi_iter .bi_size = btree_buf_bytes ( b );
12881288
12891289 if (rb -> have_ioref ) {
12901290 bio_set_dev (bio , ca -> disk_sb .bdev );
@@ -1512,7 +1512,7 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
15121512 }
15131513
15141514 if (best >= 0 ) {
1515- memcpy (b -> data , ra -> buf [best ], btree_bytes ( c ));
1515+ memcpy (b -> data , ra -> buf [best ], btree_buf_bytes ( b ));
15161516 ret = bch2_btree_node_read_done (c , NULL , b , false, saw_error );
15171517 } else {
15181518 ret = -1 ;
@@ -1578,7 +1578,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
15781578 for (i = 0 ; i < ra -> nr ; i ++ ) {
15791579 ra -> buf [i ] = mempool_alloc (& c -> btree_bounce_pool , GFP_NOFS );
15801580 ra -> bio [i ] = bio_alloc_bioset (NULL ,
1581- buf_pages (ra -> buf [i ], btree_bytes ( c )),
1581+ buf_pages (ra -> buf [i ], btree_buf_bytes ( b )),
15821582 REQ_OP_READ |REQ_SYNC |REQ_META ,
15831583 GFP_NOFS ,
15841584 & c -> btree_bio );
@@ -1598,7 +1598,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
15981598 rb -> pick = pick ;
15991599 rb -> bio .bi_iter .bi_sector = pick .ptr .offset ;
16001600 rb -> bio .bi_end_io = btree_node_read_all_replicas_endio ;
1601- bch2_bio_map (& rb -> bio , ra -> buf [i ], btree_bytes ( c ));
1601+ bch2_bio_map (& rb -> bio , ra -> buf [i ], btree_buf_bytes ( b ));
16021602
16031603 if (rb -> have_ioref ) {
16041604 this_cpu_add (ca -> io_done -> sectors [READ ][BCH_DATA_btree ],
@@ -1665,7 +1665,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
16651665 ca = bch_dev_bkey_exists (c , pick .ptr .dev );
16661666
16671667 bio = bio_alloc_bioset (NULL ,
1668- buf_pages (b -> data , btree_bytes ( c )),
1668+ buf_pages (b -> data , btree_buf_bytes ( b )),
16691669 REQ_OP_READ |REQ_SYNC |REQ_META ,
16701670 GFP_NOFS ,
16711671 & c -> btree_bio );
@@ -1679,7 +1679,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
16791679 INIT_WORK (& rb -> work , btree_node_read_work );
16801680 bio -> bi_iter .bi_sector = pick .ptr .offset ;
16811681 bio -> bi_end_io = btree_node_read_endio ;
1682- bch2_bio_map (bio , b -> data , btree_bytes ( c ));
1682+ bch2_bio_map (bio , b -> data , btree_buf_bytes ( b ));
16831683
16841684 if (rb -> have_ioref ) {
16851685 this_cpu_add (ca -> io_done -> sectors [READ ][BCH_DATA_btree ],
@@ -2074,8 +2074,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
20742074 i -> u64s = 0 ;
20752075
20762076 sort_iter_add (& sort_iter .iter ,
2077- unwritten_whiteouts_start (c , b ),
2078- unwritten_whiteouts_end (c , b ));
2077+ unwritten_whiteouts_start (b ),
2078+ unwritten_whiteouts_end (b ));
20792079 SET_BSET_SEPARATE_WHITEOUTS (i , false);
20802080
20812081 b -> whiteout_u64s = 0 ;
@@ -2251,7 +2251,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
22512251
22522252 bne = want_new_bset (c , b );
22532253 if (bne )
2254- bch2_bset_init_next (c , b , bne );
2254+ bch2_bset_init_next (b , bne );
22552255
22562256 bch2_btree_build_aux_trees (b );
22572257
0 commit comments