@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
14541454static void kcryptd_async_done (struct crypto_async_request * async_req ,
14551455 int error );
14561456
1457- static void crypt_alloc_req_skcipher (struct crypt_config * cc ,
1457+ static int crypt_alloc_req_skcipher (struct crypt_config * cc ,
14581458 struct convert_context * ctx )
14591459{
14601460 unsigned key_index = ctx -> cc_sector & (cc -> tfms_count - 1 );
14611461
1462- if (!ctx -> r .req )
1463- ctx -> r .req = mempool_alloc (& cc -> req_pool , GFP_NOIO );
1462+ if (!ctx -> r .req ) {
1463+ ctx -> r .req = mempool_alloc (& cc -> req_pool , in_interrupt () ? GFP_ATOMIC : GFP_NOIO );
1464+ if (!ctx -> r .req )
1465+ return - ENOMEM ;
1466+ }
14641467
14651468 skcipher_request_set_tfm (ctx -> r .req , cc -> cipher_tfm .tfms [key_index ]);
14661469
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
14711474 skcipher_request_set_callback (ctx -> r .req ,
14721475 CRYPTO_TFM_REQ_MAY_BACKLOG ,
14731476 kcryptd_async_done , dmreq_of_req (cc , ctx -> r .req ));
1477+
1478+ return 0 ;
14741479}
14751480
1476- static void crypt_alloc_req_aead (struct crypt_config * cc ,
1481+ static int crypt_alloc_req_aead (struct crypt_config * cc ,
14771482 struct convert_context * ctx )
14781483{
1479- if (!ctx -> r .req_aead )
1480- ctx -> r .req_aead = mempool_alloc (& cc -> req_pool , GFP_NOIO );
1484+ if (!ctx -> r .req ) {
1485+ ctx -> r .req = mempool_alloc (& cc -> req_pool , in_interrupt () ? GFP_ATOMIC : GFP_NOIO );
1486+ if (!ctx -> r .req )
1487+ return - ENOMEM ;
1488+ }
14811489
14821490 aead_request_set_tfm (ctx -> r .req_aead , cc -> cipher_tfm .tfms_aead [0 ]);
14831491
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
14881496 aead_request_set_callback (ctx -> r .req_aead ,
14891497 CRYPTO_TFM_REQ_MAY_BACKLOG ,
14901498 kcryptd_async_done , dmreq_of_req (cc , ctx -> r .req_aead ));
1499+
1500+ return 0 ;
14911501}
14921502
1493- static void crypt_alloc_req (struct crypt_config * cc ,
1503+ static int crypt_alloc_req (struct crypt_config * cc ,
14941504 struct convert_context * ctx )
14951505{
14961506 if (crypt_integrity_aead (cc ))
1497- crypt_alloc_req_aead (cc , ctx );
1507+ return crypt_alloc_req_aead (cc , ctx );
14981508 else
1499- crypt_alloc_req_skcipher (cc , ctx );
1509+ return crypt_alloc_req_skcipher (cc , ctx );
15001510}
15011511
15021512static void crypt_free_req_skcipher (struct crypt_config * cc ,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
15291539 * Encrypt / decrypt data from one bio to another one (can be the same one)
15301540 */
15311541static blk_status_t crypt_convert (struct crypt_config * cc ,
1532- struct convert_context * ctx , bool atomic )
1542+ struct convert_context * ctx , bool atomic , bool reset_pending )
15331543{
15341544 unsigned int tag_offset = 0 ;
15351545 unsigned int sector_step = cc -> sector_size >> SECTOR_SHIFT ;
15361546 int r ;
15371547
1538- atomic_set (& ctx -> cc_pending , 1 );
1548+ /*
1549+ * if reset_pending is set we are dealing with the bio for the first time,
1550+ * else we're continuing to work on the previous bio, so don't mess with
1551+ * the cc_pending counter
1552+ */
1553+ if (reset_pending )
1554+ atomic_set (& ctx -> cc_pending , 1 );
15391555
15401556 while (ctx -> iter_in .bi_size && ctx -> iter_out .bi_size ) {
15411557
1542- crypt_alloc_req (cc , ctx );
1558+ r = crypt_alloc_req (cc , ctx );
1559+ if (r ) {
1560+ complete (& ctx -> restart );
1561+ return BLK_STS_DEV_RESOURCE ;
1562+ }
1563+
15431564 atomic_inc (& ctx -> cc_pending );
15441565
15451566 if (crypt_integrity_aead (cc ))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
15531574 * but the driver request queue is full, let's wait.
15541575 */
15551576 case - EBUSY :
1556- wait_for_completion (& ctx -> restart );
1577+ if (in_interrupt ()) {
1578+ if (try_wait_for_completion (& ctx -> restart )) {
1579+ /*
1580+ * we don't have to block to wait for completion,
1581+ * so proceed
1582+ */
1583+ } else {
1584+ /*
1585+ * we can't wait for completion without blocking
1586+ * exit and continue processing in a workqueue
1587+ */
1588+ ctx -> r .req = NULL ;
1589+ ctx -> cc_sector += sector_step ;
1590+ tag_offset ++ ;
1591+ return BLK_STS_DEV_RESOURCE ;
1592+ }
1593+ } else {
1594+ wait_for_completion (& ctx -> restart );
1595+ }
15571596 reinit_completion (& ctx -> restart );
15581597 fallthrough ;
15591598 /*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
16911730 atomic_inc (& io -> io_pending );
16921731}
16931732
1733+ static void kcryptd_io_bio_endio (struct work_struct * work )
1734+ {
1735+ struct dm_crypt_io * io = container_of (work , struct dm_crypt_io , work );
1736+ bio_endio (io -> base_bio );
1737+ }
1738+
16941739/*
16951740 * One of the bios was finished. Check for completion of
16961741 * the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
17131758 kfree (io -> integrity_metadata );
17141759
17151760 base_bio -> bi_status = error ;
1716- bio_endio (base_bio );
1761+
1762+ /*
1763+ * If we are running this function from our tasklet,
1764+ * we can't call bio_endio() here, because it will call
1765+ * clone_endio() from dm.c, which in turn will
1766+ * free the current struct dm_crypt_io structure with
1767+ * our tasklet. In this case we need to delay bio_endio()
1768+ * execution to after the tasklet is done and dequeued.
1769+ */
1770+ if (tasklet_trylock (& io -> tasklet )) {
1771+ tasklet_unlock (& io -> tasklet );
1772+ bio_endio (base_bio );
1773+ return ;
1774+ }
1775+
1776+ INIT_WORK (& io -> work , kcryptd_io_bio_endio );
1777+ queue_work (cc -> io_queue , & io -> work );
17171778}
17181779
17191780/*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
19452006 }
19462007}
19472008
2009+ static void kcryptd_crypt_write_continue (struct work_struct * work )
2010+ {
2011+ struct dm_crypt_io * io = container_of (work , struct dm_crypt_io , work );
2012+ struct crypt_config * cc = io -> cc ;
2013+ struct convert_context * ctx = & io -> ctx ;
2014+ int crypt_finished ;
2015+ sector_t sector = io -> sector ;
2016+ blk_status_t r ;
2017+
2018+ wait_for_completion (& ctx -> restart );
2019+ reinit_completion (& ctx -> restart );
2020+
2021+ r = crypt_convert (cc , & io -> ctx , true, false);
2022+ if (r )
2023+ io -> error = r ;
2024+ crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
2025+ if (!crypt_finished && kcryptd_crypt_write_inline (cc , ctx )) {
2026+ /* Wait for completion signaled by kcryptd_async_done() */
2027+ wait_for_completion (& ctx -> restart );
2028+ crypt_finished = 1 ;
2029+ }
2030+
2031+ /* Encryption was already finished, submit io now */
2032+ if (crypt_finished ) {
2033+ kcryptd_crypt_write_io_submit (io , 0 );
2034+ io -> sector = sector ;
2035+ }
2036+
2037+ crypt_dec_pending (io );
2038+ }
2039+
19482040static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
19492041{
19502042 struct crypt_config * cc = io -> cc ;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
19732065
19742066 crypt_inc_pending (io );
19752067 r = crypt_convert (cc , ctx ,
1976- test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ));
2068+ test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ), true);
2069+ /*
2070+ * Crypto API backlogged the request, because its queue was full
2071+ * and we're in softirq context, so continue from a workqueue
2072+ * (TODO: is it actually possible to be in softirq in the write path?)
2073+ */
2074+ if (r == BLK_STS_DEV_RESOURCE ) {
2075+ INIT_WORK (& io -> work , kcryptd_crypt_write_continue );
2076+ queue_work (cc -> crypt_queue , & io -> work );
2077+ return ;
2078+ }
19772079 if (r )
19782080 io -> error = r ;
19792081 crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
19982100 crypt_dec_pending (io );
19992101}
20002102
2103+ static void kcryptd_crypt_read_continue (struct work_struct * work )
2104+ {
2105+ struct dm_crypt_io * io = container_of (work , struct dm_crypt_io , work );
2106+ struct crypt_config * cc = io -> cc ;
2107+ blk_status_t r ;
2108+
2109+ wait_for_completion (& io -> ctx .restart );
2110+ reinit_completion (& io -> ctx .restart );
2111+
2112+ r = crypt_convert (cc , & io -> ctx , true, false);
2113+ if (r )
2114+ io -> error = r ;
2115+
2116+ if (atomic_dec_and_test (& io -> ctx .cc_pending ))
2117+ kcryptd_crypt_read_done (io );
2118+
2119+ crypt_dec_pending (io );
2120+ }
2121+
20012122static void kcryptd_crypt_read_convert (struct dm_crypt_io * io )
20022123{
20032124 struct crypt_config * cc = io -> cc ;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
20092130 io -> sector );
20102131
20112132 r = crypt_convert (cc , & io -> ctx ,
2012- test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags ));
2133+ test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags ), true);
2134+ /*
2135+ * Crypto API backlogged the request, because its queue was full
2136+ * and we're in softirq context, so continue from a workqueue
2137+ */
2138+ if (r == BLK_STS_DEV_RESOURCE ) {
2139+ INIT_WORK (& io -> work , kcryptd_crypt_read_continue );
2140+ queue_work (cc -> crypt_queue , & io -> work );
2141+ return ;
2142+ }
20132143 if (r )
20142144 io -> error = r ;
20152145
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
20912221
20922222 if ((bio_data_dir (io -> base_bio ) == READ && test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags )) ||
20932223 (bio_data_dir (io -> base_bio ) == WRITE && test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ))) {
2094- if (in_irq ()) {
2095- /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
2224+ /*
2225+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2226+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2227+ * it is being executed with irqs disabled.
2228+ */
2229+ if (in_irq () || irqs_disabled ()) {
20962230 tasklet_init (& io -> tasklet , kcryptd_crypt_tasklet , (unsigned long )& io -> work );
20972231 tasklet_schedule (& io -> tasklet );
20982232 return ;
0 commit comments