@@ -214,6 +214,157 @@ static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
214214 return 0 ;
215215}
216216
217+ /*
218+ * Write data into @out_folio and queue it into @out_bio.
219+ *
220+ * Return 0 if everything is fine and @total_out will be increased.
221+ * Return <0 for error.
222+ *
223+ * The @out_folio can be NULL after a full folio is queued.
224+ * Thus the caller should check and allocate a new folio when needed.
225+ */
226+ static int write_and_queue_folio (struct bio * out_bio , struct folio * * out_folio ,
227+ u32 * total_out , u32 write_len )
228+ {
229+ const u32 fsize = folio_size (* out_folio );
230+ const u32 foffset = offset_in_folio (* out_folio , * total_out );
231+
232+ ASSERT (out_folio && * out_folio );
233+ /* Should not cross folio boundary. */
234+ ASSERT (foffset + write_len <= fsize );
235+
236+ /* We can not use bio_add_folio_nofail() which doesn't do any merge. */
237+ if (!bio_add_folio (out_bio , * out_folio , write_len , foffset )) {
238+ /*
239+ * We have allocated a bio that havs BTRFS_MAX_COMPRESSED_PAGES
240+ * vecs, and all ranges inside the same folio should have been
241+ * merged. If bio_add_folio() still failed, that means we have
242+ * reached the bvec limits.
243+ *
244+ * This should only happen at the beginning of a folio, and
245+ * caller is responsible for releasing the folio, since it's
246+ * not yet queued into the bio.
247+ */
248+ ASSERT (IS_ALIGNED (* total_out , fsize ));
249+ return - E2BIG ;
250+ }
251+
252+ * total_out += write_len ;
253+ /*
254+ * The full folio has been filled and queued, reset @out_folio to NULL,
255+ * so that error handling is fully handled by the bio.
256+ */
257+ if (IS_ALIGNED (* total_out , fsize ))
258+ * out_folio = NULL ;
259+ return 0 ;
260+ }
261+
262+ /*
263+ * Copy compressed data to bio.
264+ *
265+ * @out_bio: The bio that will contain all the compressed data.
266+ * @compressed_data: The compressed data of this segment.
267+ * @compressed_size: The size of the compressed data.
268+ * @out_folio: The current output folio, will be updated if a new
269+ * folio is allocated.
270+ * @total_out: The total bytes of current output.
271+ * @max_out: The maximum size of the compressed data.
272+ *
273+ * Will do:
274+ *
275+ * - Write a segment header into the destination
276+ * - Copy the compressed buffer into the destination
277+ * - Make sure we have enough space in the last sector to fit a segment header
278+ * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
279+ * - If a full folio is filled, it will be queued into @out_bio, and @out_folio
280+ * will be updated.
281+ *
282+ * Will allocate new pages when needed.
283+ */
284+ static int copy_compressed_data_to_bio (struct btrfs_fs_info * fs_info ,
285+ struct bio * out_bio ,
286+ const char * compressed_data ,
287+ size_t compressed_size ,
288+ struct folio * * out_folio ,
289+ u32 * total_out , u32 max_out )
290+ {
291+ const u32 sectorsize = fs_info -> sectorsize ;
292+ const u32 sectorsize_bits = fs_info -> sectorsize_bits ;
293+ const u32 fsize = btrfs_min_folio_size (fs_info );
294+ const u32 old_size = out_bio -> bi_iter .bi_size ;
295+ u32 copy_start ;
296+ u32 sector_bytes_left ;
297+ char * kaddr ;
298+ int ret ;
299+
300+ ASSERT (out_folio );
301+
302+ /* There should be at least a lzo header queued. */
303+ ASSERT (old_size );
304+ ASSERT (old_size == * total_out );
305+
306+ /*
307+ * We never allow a segment header crossing sector boundary, previous
308+ * run should ensure we have enough space left inside the sector.
309+ */
310+ ASSERT ((old_size >> sectorsize_bits ) == (old_size + LZO_LEN - 1 ) >> sectorsize_bits );
311+
312+ if (!* out_folio ) {
313+ * out_folio = btrfs_alloc_compr_folio (fs_info );
314+ if (!* out_folio )
315+ return - ENOMEM ;
316+ }
317+
318+ /* Write the segment header first. */
319+ kaddr = kmap_local_folio (* out_folio , offset_in_folio (* out_folio , * total_out ));
320+ write_compress_length (kaddr , compressed_size );
321+ kunmap_local (kaddr );
322+ ret = write_and_queue_folio (out_bio , out_folio , total_out , LZO_LEN );
323+ if (ret < 0 )
324+ return ret ;
325+
326+ copy_start = * total_out ;
327+
328+ /* Copy compressed data. */
329+ while (* total_out - copy_start < compressed_size ) {
330+ u32 copy_len = min_t (u32 , sectorsize - * total_out % sectorsize ,
331+ copy_start + compressed_size - * total_out );
332+ u32 foffset = * total_out & (fsize - 1 );
333+
334+ /* With the range copied, we're larger than the original range. */
335+ if (((* total_out + copy_len ) >> sectorsize_bits ) >=
336+ max_out >> sectorsize_bits )
337+ return - E2BIG ;
338+
339+ if (!* out_folio ) {
340+ * out_folio = btrfs_alloc_compr_folio (fs_info );
341+ if (!* out_folio )
342+ return - ENOMEM ;
343+ }
344+
345+ kaddr = kmap_local_folio (* out_folio , foffset );
346+ memcpy (kaddr , compressed_data + * total_out - copy_start , copy_len );
347+ kunmap_local (kaddr );
348+ ret = write_and_queue_folio (out_bio , out_folio , total_out , copy_len );
349+ if (ret < 0 )
350+ return ret ;
351+ }
352+
353+ /*
354+ * Check if we can fit the next segment header into the remaining space
355+ * of the sector.
356+ */
357+ sector_bytes_left = round_up (* total_out , sectorsize ) - * total_out ;
358+ if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0 )
359+ return 0 ;
360+
361+ ASSERT (* out_folio );
362+
363+ /* The remaining size is not enough, pad it with zeros */
364+ folio_zero_range (* out_folio , offset_in_folio (* out_folio , * total_out ), sector_bytes_left );
365+ return write_and_queue_folio (out_bio , out_folio , total_out , sector_bytes_left );
366+ }
367+
217368int lzo_compress_folios (struct list_head * ws , struct btrfs_inode * inode ,
218369 u64 start , struct folio * * folios , unsigned long * out_folios ,
219370 unsigned long * total_in , unsigned long * total_out )
@@ -310,6 +461,113 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
310461 return ret ;
311462}
312463
464+ int lzo_compress_bio (struct list_head * ws , struct compressed_bio * cb )
465+ {
466+ struct btrfs_inode * inode = cb -> bbio .inode ;
467+ struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
468+ struct workspace * workspace = list_entry (ws , struct workspace , list );
469+ struct bio * bio = & cb -> bbio .bio ;
470+ const u64 start = cb -> start ;
471+ const u32 len = cb -> len ;
472+ const u32 sectorsize = fs_info -> sectorsize ;
473+ const u32 min_folio_size = btrfs_min_folio_size (fs_info );
474+ struct address_space * mapping = inode -> vfs_inode .i_mapping ;
475+ struct folio * folio_in = NULL ;
476+ struct folio * folio_out = NULL ;
477+ char * sizes_ptr ;
478+ int ret = 0 ;
479+ /* Points to the file offset of input data. */
480+ u64 cur_in = start ;
481+ /* Points to the current output byte. */
482+ u32 total_out = 0 ;
483+
484+ ASSERT (bio -> bi_iter .bi_size == 0 );
485+ ASSERT (len );
486+
487+ folio_out = btrfs_alloc_compr_folio (fs_info );
488+ if (!folio_out )
489+ return - ENOMEM ;
490+
491+ /* Queue a segment header first. */
492+ ret = write_and_queue_folio (bio , & folio_out , & total_out , LZO_LEN );
493+ /* The first header should not fail. */
494+ ASSERT (ret == 0 );
495+
496+ while (cur_in < start + len ) {
497+ char * data_in ;
498+ const u32 sectorsize_mask = sectorsize - 1 ;
499+ u32 sector_off = (cur_in - start ) & sectorsize_mask ;
500+ u32 in_len ;
501+ size_t out_len ;
502+
503+ /* Get the input page first. */
504+ if (!folio_in ) {
505+ ret = btrfs_compress_filemap_get_folio (mapping , cur_in , & folio_in );
506+ if (ret < 0 )
507+ goto out ;
508+ }
509+
510+ /* Compress at most one sector of data each time. */
511+ in_len = min_t (u32 , start + len - cur_in , sectorsize - sector_off );
512+ ASSERT (in_len );
513+ data_in = kmap_local_folio (folio_in , offset_in_folio (folio_in , cur_in ));
514+ ret = lzo1x_1_compress (data_in , in_len , workspace -> cbuf , & out_len ,
515+ workspace -> mem );
516+ kunmap_local (data_in );
517+ if (unlikely (ret < 0 )) {
518+ /* lzo1x_1_compress never fails. */
519+ ret = - EIO ;
520+ goto out ;
521+ }
522+
523+ ret = copy_compressed_data_to_bio (fs_info , bio , workspace -> cbuf , out_len ,
524+ & folio_out , & total_out , len );
525+ if (ret < 0 )
526+ goto out ;
527+
528+ cur_in += in_len ;
529+
530+ /*
531+ * Check if we're making it bigger after two sectors. And if
532+ * it is so, give up.
533+ */
534+ if (cur_in - start > sectorsize * 2 && cur_in - start < total_out ) {
535+ ret = - E2BIG ;
536+ goto out ;
537+ }
538+
539+ /* Check if we have reached input folio boundary. */
540+ if (IS_ALIGNED (cur_in , min_folio_size )) {
541+ folio_put (folio_in );
542+ folio_in = NULL ;
543+ }
544+ }
545+ /*
546+ * The last folio is already queued. Bio is responsible for freeing
547+ * those folios now.
548+ */
549+ folio_out = NULL ;
550+
551+ /* Store the size of all chunks of compressed data */
552+ sizes_ptr = kmap_local_folio (bio_first_folio_all (bio ), 0 );
553+ write_compress_length (sizes_ptr , total_out );
554+ kunmap_local (sizes_ptr );
555+ out :
556+ /*
557+ * We can only free the folio that has no part queued into the bio.
558+ *
559+ * As any folio that is already queued into bio will be released by
560+ * the endio function of bio.
561+ */
562+ if (folio_out && IS_ALIGNED (total_out , min_folio_size )) {
563+ btrfs_free_compr_folio (folio_out );
564+ folio_out = NULL ;
565+ }
566+ if (folio_in )
567+ folio_put (folio_in );
568+ return ret ;
569+ }
570+
313571static struct folio * get_current_folio (struct compressed_bio * cb , struct folio_iter * fi ,
314572 u32 * cur_folio_index , u32 cur_in )
315573{
0 commit comments