@@ -598,6 +598,8 @@ int bch2_data_update_init(struct btree_trans *trans,
598598 i ++ ;
599599 }
600600
601+ unsigned durability_required = max (0 , (int ) (io_opts .data_replicas - durability_have ));
602+
601603 /*
602604 * If current extent durability is less than io_opts.data_replicas,
603605 * we're not trying to rereplicate the extent up to data_replicas here -
@@ -607,7 +609,7 @@ int bch2_data_update_init(struct btree_trans *trans,
607609 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
608610 */
609611 if (!(m -> data_opts .write_flags & BCH_WRITE_CACHED ) &&
610- durability_have >= io_opts . data_replicas ) {
612+ ! durability_required ) {
611613 m -> data_opts .kill_ptrs |= m -> data_opts .rewrite_ptrs ;
612614 m -> data_opts .rewrite_ptrs = 0 ;
613615 /* if iter == NULL, it's just a promote */
@@ -616,11 +618,18 @@ int bch2_data_update_init(struct btree_trans *trans,
616618 goto done ;
617619 }
618620
619- m -> op .nr_replicas = min (durability_removing , io_opts . data_replicas - durability_have ) +
621+ m -> op .nr_replicas = min (durability_removing , durability_required ) +
620622 m -> data_opts .extra_replicas ;
621- m -> op .nr_replicas_required = m -> op .nr_replicas ;
622623
623- BUG_ON (!m -> op .nr_replicas );
624+ /*
625+ * If device(s) were set to durability=0 after data was written to them
626+ * we can end up with a duribilty=0 extent, and the normal algorithm
627+ * that tries not to increase durability doesn't work:
628+ */
629+ if (!(durability_have + durability_removing ))
630+ m -> op .nr_replicas = max ((unsigned ) m -> op .nr_replicas , 1 );
631+
632+ m -> op .nr_replicas_required = m -> op .nr_replicas ;
624633
625634 if (reserve_sectors ) {
626635 ret = bch2_disk_reservation_add (c , & m -> op .res , reserve_sectors ,
0 commit comments