@@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
11461146static ssize_t debug_stat_show (struct device * dev ,
11471147 struct device_attribute * attr , char * buf )
11481148{
1149- int version = 2 ;
1149+ int version = 1 ;
11501150 struct zram * zram = dev_to_zram (dev );
11511151 ssize_t ret ;
11521152
11531153 down_read (& zram -> init_lock );
11541154 ret = scnprintf (buf , PAGE_SIZE ,
1155- "version: %d\n%8llu\n" ,
1155+ "version: %d\n%8llu %8llu \n" ,
11561156 version ,
1157+ (u64 )atomic64_read (& zram -> stats .writestall ),
11571158 (u64 )atomic64_read (& zram -> stats .miss_free ));
11581159 up_read (& zram -> init_lock );
11591160
@@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13511352{
13521353 int ret = 0 ;
13531354 unsigned long alloced_pages ;
1354- unsigned long handle = 0 ;
1355+ unsigned long handle = - ENOMEM ;
13551356 unsigned int comp_len = 0 ;
13561357 void * src , * dst , * mem ;
13571358 struct zcomp_strm * zstrm ;
@@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13691370 }
13701371 kunmap_atomic (mem );
13711372
1373+ compress_again :
13721374 zstrm = zcomp_stream_get (zram -> comp );
13731375 src = kmap_atomic (page );
13741376 ret = zcomp_compress (zstrm , src , & comp_len );
@@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13771379 if (unlikely (ret )) {
13781380 zcomp_stream_put (zram -> comp );
13791381 pr_err ("Compression failed! err=%d\n" , ret );
1382+ zs_free (zram -> mem_pool , handle );
13801383 return ret ;
13811384 }
13821385
13831386 if (comp_len >= huge_class_size )
13841387 comp_len = PAGE_SIZE ;
1385-
1386- handle = zs_malloc (zram -> mem_pool , comp_len ,
1387- __GFP_KSWAPD_RECLAIM |
1388- __GFP_NOWARN |
1389- __GFP_HIGHMEM |
1390- __GFP_MOVABLE );
1391-
1388+ /*
1389+ * handle allocation has 2 paths:
1390+ * a) fast path is executed with preemption disabled (for
1391+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1392+ * since we can't sleep;
1393+ * b) slow path enables preemption and attempts to allocate
1394+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1395+ * put per-cpu compression stream and, thus, to re-do
1396+ * the compression once handle is allocated.
1397+ *
1398+ * if we have a 'non-null' handle here then we are coming
1399+ * from the slow path and handle has already been allocated.
1400+ */
1401+ if (IS_ERR ((void * )handle ))
1402+ handle = zs_malloc (zram -> mem_pool , comp_len ,
1403+ __GFP_KSWAPD_RECLAIM |
1404+ __GFP_NOWARN |
1405+ __GFP_HIGHMEM |
1406+ __GFP_MOVABLE );
13921407 if (IS_ERR ((void * )handle )) {
13931408 zcomp_stream_put (zram -> comp );
1409+ atomic64_inc (& zram -> stats .writestall );
1410+ handle = zs_malloc (zram -> mem_pool , comp_len ,
1411+ GFP_NOIO | __GFP_HIGHMEM |
1412+ __GFP_MOVABLE );
1413+ if (!IS_ERR ((void * )handle ))
1414+ goto compress_again ;
13941415 return PTR_ERR ((void * )handle );
13951416 }
13961417
@@ -1948,6 +1969,7 @@ static int zram_add(void)
19481969 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE )
19491970 blk_queue_max_write_zeroes_sectors (zram -> disk -> queue , UINT_MAX );
19501971
1972+ blk_queue_flag_set (QUEUE_FLAG_STABLE_WRITES , zram -> disk -> queue );
19511973 ret = device_add_disk (NULL , zram -> disk , zram_disk_groups );
19521974 if (ret )
19531975 goto out_cleanup_disk ;
0 commit comments