121121#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
122122#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
123123
124+ #define HUGE_BITS 1
124125#define FULLNESS_BITS 2
125126#define CLASS_BITS 8
126127#define ISOLATED_BITS 3
@@ -213,22 +214,6 @@ struct size_class {
213214 struct zs_size_stat stats ;
214215};
215216
216- /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
217- static void SetPageHugeObject (struct page * page )
218- {
219- SetPageOwnerPriv1 (page );
220- }
221-
222- static void ClearPageHugeObject (struct page * page )
223- {
224- ClearPageOwnerPriv1 (page );
225- }
226-
227- static int PageHugeObject (struct page * page )
228- {
229- return PageOwnerPriv1 (page );
230- }
231-
232217/*
233218 * Placed within free objects to form a singly linked list.
234219 * For every zspage, zspage->freeobj gives head of this list.
@@ -278,6 +263,7 @@ struct zs_pool {
278263
279264struct zspage {
280265 struct {
266+ unsigned int huge :HUGE_BITS ;
281267 unsigned int fullness :FULLNESS_BITS ;
282268 unsigned int class :CLASS_BITS + 1 ;
283269 unsigned int isolated :ISOLATED_BITS ;
@@ -298,6 +284,17 @@ struct mapping_area {
298284 enum zs_mapmode vm_mm ; /* mapping mode */
299285};
300286
287+ /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
288+ static void SetZsHugePage (struct zspage * zspage )
289+ {
290+ zspage -> huge = 1 ;
291+ }
292+
293+ static bool ZsHugePage (struct zspage * zspage )
294+ {
295+ return zspage -> huge ;
296+ }
297+
301298#ifdef CONFIG_COMPACTION
302299static int zs_register_migration (struct zs_pool * pool );
303300static void zs_unregister_migration (struct zs_pool * pool );
@@ -830,7 +827,9 @@ static struct zspage *get_zspage(struct page *page)
830827
831828static struct page * get_next_page (struct page * page )
832829{
833- if (unlikely (PageHugeObject (page )))
830+ struct zspage * zspage = get_zspage (page );
831+
832+ if (unlikely (ZsHugePage (zspage )))
834833 return NULL ;
835834
836835 return (struct page * )page -> index ;
@@ -880,8 +879,9 @@ static unsigned long handle_to_obj(unsigned long handle)
880879static bool obj_allocated (struct page * page , void * obj , unsigned long * phandle )
881880{
882881 unsigned long handle ;
882+ struct zspage * zspage = get_zspage (page );
883883
884- if (unlikely (PageHugeObject ( page ))) {
884+ if (unlikely (ZsHugePage ( zspage ))) {
885885 VM_BUG_ON_PAGE (!is_first_page (page ), page );
886886 handle = page -> index ;
887887 } else
@@ -920,7 +920,6 @@ static void reset_page(struct page *page)
920920 ClearPagePrivate (page );
921921 set_page_private (page , 0 );
922922 page_mapcount_reset (page );
923- ClearPageHugeObject (page );
924923 page -> index = 0 ;
925924}
926925
@@ -1062,7 +1061,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
10621061 SetPagePrivate (page );
10631062 if (unlikely (class -> objs_per_zspage == 1 &&
10641063 class -> pages_per_zspage == 1 ))
1065- SetPageHugeObject ( page );
1064+ SetZsHugePage ( zspage );
10661065 } else {
10671066 prev_page -> index = (unsigned long )page ;
10681067 }
@@ -1307,7 +1306,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
13071306
13081307 ret = __zs_map_object (area , pages , off , class -> size );
13091308out :
1310- if (likely (!PageHugeObject ( page )))
1309+ if (likely (!ZsHugePage ( zspage )))
13111310 ret += ZS_HANDLE_SIZE ;
13121311
13131312 return ret ;
@@ -1395,7 +1394,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
13951394 vaddr = kmap_atomic (m_page );
13961395 link = (struct link_free * )vaddr + m_offset / sizeof (* link );
13971396 set_freeobj (zspage , link -> next >> OBJ_TAG_BITS );
1398- if (likely (!PageHugeObject ( m_page )))
1397+ if (likely (!ZsHugePage ( zspage )))
13991398 /* record handle in the header of allocated chunk */
14001399 link -> handle = handle ;
14011400 else
@@ -1496,7 +1495,10 @@ static void obj_free(int class_size, unsigned long obj)
14961495
14971496 /* Insert this object in containing zspage's freelist */
14981497 link = (struct link_free * )(vaddr + f_offset );
1499- link -> next = get_freeobj (zspage ) << OBJ_TAG_BITS ;
1498+ if (likely (!ZsHugePage (zspage )))
1499+ link -> next = get_freeobj (zspage ) << OBJ_TAG_BITS ;
1500+ else
1501+ f_page -> index = 0 ;
15001502 kunmap_atomic (vaddr );
15011503 set_freeobj (zspage , f_objidx );
15021504 mod_zspage_inuse (zspage , -1 );
@@ -1867,7 +1869,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
18671869
18681870 create_page_chain (class , zspage , pages );
18691871 set_first_obj_offset (newpage , get_first_obj_offset (oldpage ));
1870- if (unlikely (PageHugeObject ( oldpage )))
1872+ if (unlikely (ZsHugePage ( zspage )))
18711873 newpage -> index = oldpage -> index ;
18721874 __SetPageMovable (newpage , page_mapping (oldpage ));
18731875}
0 commit comments