@@ -76,13 +76,34 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
7676 folio_mark_uptodate (folio );
7777}
7878
79- static inline bool ifs_block_is_dirty (struct folio * folio ,
80- struct iomap_folio_state * ifs , int block )
79+ /*
80+ * Find the next dirty block in the folio. end_blk is inclusive.
81+ * If no dirty block is found, this will return end_blk + 1.
82+ */
83+ static unsigned ifs_next_dirty_block (struct folio * folio ,
84+ unsigned start_blk , unsigned end_blk )
8185{
86+ struct iomap_folio_state * ifs = folio -> private ;
8287 struct inode * inode = folio -> mapping -> host ;
83- unsigned int blks_per_folio = i_blocks_per_folio (inode , folio );
88+ unsigned int blks = i_blocks_per_folio (inode , folio );
89+
90+ return find_next_bit (ifs -> state , blks + end_blk + 1 ,
91+ blks + start_blk ) - blks ;
92+ }
93+
94+ /*
95+ * Find the next clean block in the folio. end_blk is inclusive.
96+ * If no clean block is found, this will return end_blk + 1.
97+ */
98+ static unsigned ifs_next_clean_block (struct folio * folio ,
99+ unsigned start_blk , unsigned end_blk )
100+ {
101+ struct iomap_folio_state * ifs = folio -> private ;
102+ struct inode * inode = folio -> mapping -> host ;
103+ unsigned int blks = i_blocks_per_folio (inode , folio );
84104
85- return test_bit (block + blks_per_folio , ifs -> state );
105+ return find_next_zero_bit (ifs -> state , blks + end_blk + 1 ,
106+ blks + start_blk ) - blks ;
86107}
87108
88109static unsigned ifs_find_dirty_range (struct folio * folio ,
@@ -93,18 +114,17 @@ static unsigned ifs_find_dirty_range(struct folio *folio,
93114 offset_in_folio (folio , * range_start ) >> inode -> i_blkbits ;
94115 unsigned end_blk = min_not_zero (
95116 offset_in_folio (folio , range_end ) >> inode -> i_blkbits ,
96- i_blocks_per_folio (inode , folio ));
97- unsigned nblks = 1 ;
98-
99- while (!ifs_block_is_dirty (folio , ifs , start_blk ))
100- if (++ start_blk == end_blk )
101- return 0 ;
117+ i_blocks_per_folio (inode , folio )) - 1 ;
118+ unsigned nblks ;
102119
103- while (start_blk + nblks < end_blk ) {
104- if (!ifs_block_is_dirty (folio , ifs , start_blk + nblks ))
105- break ;
106- nblks ++ ;
107- }
120+ start_blk = ifs_next_dirty_block (folio , start_blk , end_blk );
121+ if (start_blk > end_blk )
122+ return 0 ;
123+ if (start_blk == end_blk )
124+ nblks = 1 ;
125+ else
126+ nblks = ifs_next_clean_block (folio , start_blk + 1 , end_blk ) -
127+ start_blk ;
108128
109129 * range_start = folio_pos (folio ) + (start_blk << inode -> i_blkbits );
110130 return nblks << inode -> i_blkbits ;
@@ -1166,7 +1186,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
11661186 struct folio * folio , loff_t start_byte , loff_t end_byte ,
11671187 struct iomap * iomap , iomap_punch_t punch )
11681188{
1169- unsigned int first_blk , last_blk , i ;
1189+ unsigned int first_blk , last_blk ;
11701190 loff_t last_byte ;
11711191 u8 blkbits = inode -> i_blkbits ;
11721192 struct iomap_folio_state * ifs ;
@@ -1185,10 +1205,11 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
11851205 folio_pos (folio ) + folio_size (folio ) - 1 );
11861206 first_blk = offset_in_folio (folio , start_byte ) >> blkbits ;
11871207 last_blk = offset_in_folio (folio , last_byte ) >> blkbits ;
1188- for (i = first_blk ; i <= last_blk ; i ++ ) {
1189- if (!ifs_block_is_dirty (folio , ifs , i ))
1190- punch (inode , folio_pos (folio ) + (i << blkbits ),
1191- 1 << blkbits , iomap );
1208+ while ((first_blk = ifs_next_clean_block (folio , first_blk , last_blk ))
1209+ <= last_blk ) {
1210+ punch (inode , folio_pos (folio ) + (first_blk << blkbits ),
1211+ 1 << blkbits , iomap );
1212+ first_blk ++ ;
11921213 }
11931214}
11941215
0 commit comments