@@ -254,10 +254,6 @@ struct zs_pool {
254254#ifdef CONFIG_COMPACTION
255255 struct inode * inode ;
256256 struct work_struct free_work ;
257- /* A wait queue for when migration races with async_free_zspage() */
258- struct wait_queue_head migration_wait ;
259- atomic_long_t isolated_pages ;
260- bool destroying ;
261257#endif
262258};
263259
@@ -454,11 +450,6 @@ MODULE_ALIAS("zpool-zsmalloc");
454450/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
455451static DEFINE_PER_CPU (struct mapping_area , zs_map_area ) ;
456452
457- static bool is_zspage_isolated (struct zspage * zspage )
458- {
459- return zspage -> isolated ;
460- }
461-
462453static __maybe_unused int is_first_page (struct page * page )
463454{
464455 return PagePrivate (page );
@@ -744,7 +735,6 @@ static void remove_zspage(struct size_class *class,
744735 enum fullness_group fullness )
745736{
746737 VM_BUG_ON (list_empty (& class -> fullness_list [fullness ]));
747- VM_BUG_ON (is_zspage_isolated (zspage ));
748738
749739 list_del_init (& zspage -> list );
750740 class_stat_dec (class , fullness , 1 );
@@ -770,13 +760,9 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
770760 if (newfg == currfg )
771761 goto out ;
772762
773- if (!is_zspage_isolated (zspage )) {
774- remove_zspage (class , zspage , currfg );
775- insert_zspage (class , zspage , newfg );
776- }
777-
763+ remove_zspage (class , zspage , currfg );
764+ insert_zspage (class , zspage , newfg );
778765 set_zspage_mapping (zspage , class_idx , newfg );
779-
780766out :
781767 return newfg ;
782768}
@@ -1511,7 +1497,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
15111497 unsigned long obj ;
15121498 struct size_class * class ;
15131499 enum fullness_group fullness ;
1514- bool isolated ;
15151500
15161501 if (unlikely (!handle ))
15171502 return ;
@@ -1533,11 +1518,9 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
15331518 goto out ;
15341519 }
15351520
1536- isolated = is_zspage_isolated (zspage );
15371521 migrate_read_unlock (zspage );
15381522 /* If zspage is isolated, zs_page_putback will free the zspage */
1539- if (likely (!isolated ))
1540- free_zspage (pool , class , zspage );
1523+ free_zspage (pool , class , zspage );
15411524out :
15421525
15431526 spin_unlock (& class -> lock );
@@ -1718,7 +1701,6 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
17181701 zspage = list_first_entry_or_null (& class -> fullness_list [fg [i ]],
17191702 struct zspage , list );
17201703 if (zspage ) {
1721- VM_BUG_ON (is_zspage_isolated (zspage ));
17221704 remove_zspage (class , zspage , fg [i ]);
17231705 return zspage ;
17241706 }
@@ -1739,8 +1721,6 @@ static enum fullness_group putback_zspage(struct size_class *class,
17391721{
17401722 enum fullness_group fullness ;
17411723
1742- VM_BUG_ON (is_zspage_isolated (zspage ));
1743-
17441724 fullness = get_fullness_group (class , zspage );
17451725 insert_zspage (class , zspage , fullness );
17461726 set_zspage_mapping (zspage , class -> index , fullness );
@@ -1822,35 +1802,10 @@ static void inc_zspage_isolation(struct zspage *zspage)
18221802
18231803static void dec_zspage_isolation (struct zspage * zspage )
18241804{
1805+ VM_BUG_ON (zspage -> isolated == 0 );
18251806 zspage -> isolated -- ;
18261807}
18271808
1828- static void putback_zspage_deferred (struct zs_pool * pool ,
1829- struct size_class * class ,
1830- struct zspage * zspage )
1831- {
1832- enum fullness_group fg ;
1833-
1834- fg = putback_zspage (class , zspage );
1835- if (fg == ZS_EMPTY )
1836- schedule_work (& pool -> free_work );
1837-
1838- }
1839-
1840- static inline void zs_pool_dec_isolated (struct zs_pool * pool )
1841- {
1842- VM_BUG_ON (atomic_long_read (& pool -> isolated_pages ) <= 0 );
1843- atomic_long_dec (& pool -> isolated_pages );
1844- /*
1845- * Checking pool->destroying must happen after atomic_long_dec()
1846- * for pool->isolated_pages above. Paired with the smp_mb() in
1847- * zs_unregister_migration().
1848- */
1849- smp_mb__after_atomic ();
1850- if (atomic_long_read (& pool -> isolated_pages ) == 0 && pool -> destroying )
1851- wake_up_all (& pool -> migration_wait );
1852- }
1853-
18541809static void replace_sub_page (struct size_class * class , struct zspage * zspage ,
18551810 struct page * newpage , struct page * oldpage )
18561811{
@@ -1876,10 +1831,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
18761831
18771832static bool zs_page_isolate (struct page * page , isolate_mode_t mode )
18781833{
1879- struct zs_pool * pool ;
1880- struct size_class * class ;
18811834 struct zspage * zspage ;
1882- struct address_space * mapping ;
18831835
18841836 /*
18851837 * Page is locked so zspage couldn't be destroyed. For detail, look at
@@ -1889,39 +1841,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
18891841 VM_BUG_ON_PAGE (PageIsolated (page ), page );
18901842
18911843 zspage = get_zspage (page );
1892-
1893- mapping = page_mapping (page );
1894- pool = mapping -> private_data ;
1895-
1896- class = zspage_class (pool , zspage );
1897-
1898- spin_lock (& class -> lock );
1899- if (get_zspage_inuse (zspage ) == 0 ) {
1900- spin_unlock (& class -> lock );
1901- return false;
1902- }
1903-
1904- /* zspage is isolated for object migration */
1905- if (list_empty (& zspage -> list ) && !is_zspage_isolated (zspage )) {
1906- spin_unlock (& class -> lock );
1907- return false;
1908- }
1909-
1910- /*
1911- * If this is first time isolation for the zspage, isolate zspage from
1912- * size_class to prevent further object allocation from the zspage.
1913- */
1914- if (!list_empty (& zspage -> list ) && !is_zspage_isolated (zspage )) {
1915- enum fullness_group fullness ;
1916- unsigned int class_idx ;
1917-
1918- get_zspage_mapping (zspage , & class_idx , & fullness );
1919- atomic_long_inc (& pool -> isolated_pages );
1920- remove_zspage (class , zspage , fullness );
1921- }
1922-
1844+ migrate_write_lock (zspage );
19231845 inc_zspage_isolation (zspage );
1924- spin_unlock ( & class -> lock );
1846+ migrate_write_unlock ( zspage );
19251847
19261848 return true;
19271849}
@@ -2004,21 +1926,6 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
20041926
20051927 dec_zspage_isolation (zspage );
20061928
2007- /*
2008- * Page migration is done so let's putback isolated zspage to
2009- * the list if @page is final isolated subpage in the zspage.
2010- */
2011- if (!is_zspage_isolated (zspage )) {
2012- /*
2013- * We cannot race with zs_destroy_pool() here because we wait
2014- * for isolation to hit zero before we start destroying.
2015- * Also, we ensure that everyone can see pool->destroying before
2016- * we start waiting.
2017- */
2018- putback_zspage_deferred (pool , class , zspage );
2019- zs_pool_dec_isolated (pool );
2020- }
2021-
20221929 if (page_zone (newpage ) != page_zone (page )) {
20231930 dec_zone_page_state (page , NR_ZSPAGES );
20241931 inc_zone_page_state (newpage , NR_ZSPAGES );
@@ -2046,30 +1953,15 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
20461953
20471954static void zs_page_putback (struct page * page )
20481955{
2049- struct zs_pool * pool ;
2050- struct size_class * class ;
2051- struct address_space * mapping ;
20521956 struct zspage * zspage ;
20531957
20541958 VM_BUG_ON_PAGE (!PageMovable (page ), page );
20551959 VM_BUG_ON_PAGE (!PageIsolated (page ), page );
20561960
20571961 zspage = get_zspage (page );
2058- mapping = page_mapping (page );
2059- pool = mapping -> private_data ;
2060- class = zspage_class (pool , zspage );
2061-
2062- spin_lock (& class -> lock );
1962+ migrate_write_lock (zspage );
20631963 dec_zspage_isolation (zspage );
2064- if (!is_zspage_isolated (zspage )) {
2065- /*
2066- * Due to page_lock, we cannot free zspage immediately
2067- * so let's defer.
2068- */
2069- putback_zspage_deferred (pool , class , zspage );
2070- zs_pool_dec_isolated (pool );
2071- }
2072- spin_unlock (& class -> lock );
1964+ migrate_write_unlock (zspage );
20731965}
20741966
20751967static const struct address_space_operations zsmalloc_aops = {
@@ -2091,36 +1983,8 @@ static int zs_register_migration(struct zs_pool *pool)
20911983 return 0 ;
20921984}
20931985
2094- static bool pool_isolated_are_drained (struct zs_pool * pool )
2095- {
2096- return atomic_long_read (& pool -> isolated_pages ) == 0 ;
2097- }
2098-
2099- /* Function for resolving migration */
2100- static void wait_for_isolated_drain (struct zs_pool * pool )
2101- {
2102-
2103- /*
2104- * We're in the process of destroying the pool, so there are no
2105- * active allocations. zs_page_isolate() fails for completely free
2106- * zspages, so we need only wait for the zs_pool's isolated
2107- * count to hit zero.
2108- */
2109- wait_event (pool -> migration_wait ,
2110- pool_isolated_are_drained (pool ));
2111- }
2112-
21131986static void zs_unregister_migration (struct zs_pool * pool )
21141987{
2115- pool -> destroying = true;
2116- /*
2117- * We need a memory barrier here to ensure global visibility of
2118- * pool->destroying. Thus pool->isolated pages will either be 0 in which
2119- * case we don't care, or it will be > 0 and pool->destroying will
2120- * ensure that we wake up once isolation hits 0.
2121- */
2122- smp_mb ();
2123- wait_for_isolated_drain (pool ); /* This can block */
21241988 flush_work (& pool -> free_work );
21251989 iput (pool -> inode );
21261990}
@@ -2150,7 +2014,6 @@ static void async_free_zspage(struct work_struct *work)
21502014 spin_unlock (& class -> lock );
21512015 }
21522016
2153-
21542017 list_for_each_entry_safe (zspage , tmp , & free_pages , list ) {
21552018 list_del (& zspage -> list );
21562019 lock_zspage (zspage );
@@ -2363,10 +2226,6 @@ struct zs_pool *zs_create_pool(const char *name)
23632226 if (!pool -> name )
23642227 goto err ;
23652228
2366- #ifdef CONFIG_COMPACTION
2367- init_waitqueue_head (& pool -> migration_wait );
2368- #endif
2369-
23702229 if (create_cache (pool ))
23712230 goto err ;
23722231
0 commit comments