Skip to content

Commit ce51c69

Browse files
author
Mikulas Patocka
committed
dm-crypt: enable DM_TARGET_ATOMIC_WRITES
Allow handling of bios with REQ_ATOMIC flag set. Don't split these bios and fail them if they overrun the hard limit "BIO_MAX_VECS << PAGE_SHIFT". In order to simplify the code, this commit joins the logic that avoids splitting emulated zone append bios with the logic that avoids splitting atomic write bios. Signed-off-by: John Garry <john.g.garry@oracle.com> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Tested-by: John Garry <john.g.garry@oracle.com>
1 parent de67c13 commit ce51c69

1 file changed

Lines changed: 24 additions & 15 deletions

File tree

drivers/md/dm-crypt.c

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -254,22 +254,15 @@ static unsigned int max_write_size = 0;
254254
module_param(max_write_size, uint, 0644);
255255
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
256256

257-
static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
257+
static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split)
258258
{
259259
struct crypt_config *cc = ti->private;
260260
unsigned val, sector_align;
261261
bool wrt = op_is_write(bio_op(bio));
262262

263-
if (wrt) {
264-
/*
265-
* For zoned devices, splitting write operations creates the
266-
* risk of deadlocking queue freeze operations with zone write
267-
* plugging BIO work when the reminder of a split BIO is
268-
* issued. So always allow the entire BIO to proceed.
269-
*/
270-
if (ti->emulate_zone_append)
271-
return bio_sectors(bio);
272-
263+
if (no_split) {
264+
val = -1;
265+
} else if (wrt) {
273266
val = min_not_zero(READ_ONCE(max_write_size),
274267
DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
275268
} else {
@@ -3462,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
34623455
struct dm_crypt_io *io;
34633456
struct crypt_config *cc = ti->private;
34643457
unsigned max_sectors;
3458+
bool no_split;
34653459

34663460
/*
34673461
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3479,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
34793473

34803474
/*
34813475
* Check if bio is too large, split as needed.
3476+
*
3477+
* For zoned devices, splitting write operations creates the
3478+
* risk of deadlocking queue freeze operations with zone write
3479+
* plugging BIO work when the reminder of a split BIO is
3480+
* issued. So always allow the entire BIO to proceed.
34823481
*/
3483-
max_sectors = get_max_request_sectors(ti, bio);
3484-
if (unlikely(bio_sectors(bio) > max_sectors))
3482+
no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) ||
3483+
(bio->bi_opf & REQ_ATOMIC);
3484+
max_sectors = get_max_request_sectors(ti, bio, no_split);
3485+
if (unlikely(bio_sectors(bio) > max_sectors)) {
3486+
if (unlikely(no_split))
3487+
return DM_MAPIO_KILL;
34853488
dm_accept_partial_bio(bio, max_sectors);
3489+
}
34863490

34873491
/*
34883492
* Ensure that bio is a multiple of internal sector encryption size
@@ -3728,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
37283732
if (ti->emulate_zone_append)
37293733
limits->max_hw_sectors = min(limits->max_hw_sectors,
37303734
BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
3735+
3736+
limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max,
3737+
BIO_MAX_VECS << PAGE_SHIFT);
3738+
limits->atomic_write_hw_max = min(limits->atomic_write_hw_max,
3739+
BIO_MAX_VECS << PAGE_SHIFT);
37313740
}
37323741

37333742
static struct target_type crypt_target = {
37343743
.name = "crypt",
3735-
.version = {1, 28, 0},
3744+
.version = {1, 29, 0},
37363745
.module = THIS_MODULE,
37373746
.ctr = crypt_ctr,
37383747
.dtr = crypt_dtr,
3739-
.features = DM_TARGET_ZONED_HM,
3748+
.features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES,
37403749
.report_zones = crypt_report_zones,
37413750
.map = crypt_map,
37423751
.status = crypt_status,

0 commit comments

Comments
 (0)