@@ -759,34 +759,29 @@ static __always_inline void slab_unlock(struct slab *slab)
759759}
760760
761761static inline bool
762- __update_freelist_fast (struct slab * slab ,
763- void * freelist_old , unsigned long counters_old ,
764- void * freelist_new , unsigned long counters_new )
762+ __update_freelist_fast (struct slab * slab , struct freelist_counters * old ,
763+ struct freelist_counters * new )
765764{
766765#ifdef system_has_freelist_aba
767- struct freelist_counters old = { .freelist = freelist_old , .counters = counters_old };
768- struct freelist_counters new = { .freelist = freelist_new , .counters = counters_new };
769-
770766 return try_cmpxchg_freelist (& slab -> freelist_counters ,
771- & old . freelist_counters ,
772- new . freelist_counters );
767+ & old -> freelist_counters ,
768+ new -> freelist_counters );
773769#else
774770 return false;
775771#endif
776772}
777773
778774static inline bool
779- __update_freelist_slow (struct slab * slab ,
780- void * freelist_old , unsigned long counters_old ,
781- void * freelist_new , unsigned long counters_new )
775+ __update_freelist_slow (struct slab * slab , struct freelist_counters * old ,
776+ struct freelist_counters * new )
782777{
783778 bool ret = false;
784779
785780 slab_lock (slab );
786- if (slab -> freelist == freelist_old &&
787- slab -> counters == counters_old ) {
788- slab -> freelist = freelist_new ;
789- slab -> counters = counters_new ;
781+ if (slab -> freelist == old -> freelist &&
782+ slab -> counters == old -> counters ) {
783+ slab -> freelist = new -> freelist ;
784+ slab -> counters = new -> counters ;
790785 ret = true;
791786 }
792787 slab_unlock (slab );
@@ -802,22 +797,18 @@ __update_freelist_slow(struct slab *slab,
802797 * interrupt the operation.
803798 */
804799static inline bool __slab_update_freelist (struct kmem_cache * s , struct slab * slab ,
805- void * freelist_old , unsigned long counters_old ,
806- void * freelist_new , unsigned long counters_new ,
807- const char * n )
800+ struct freelist_counters * old , struct freelist_counters * new , const char * n )
808801{
809802 bool ret ;
810803
811804 if (USE_LOCKLESS_FAST_PATH ())
812805 lockdep_assert_irqs_disabled ();
813806
814- if (s -> flags & __CMPXCHG_DOUBLE ) {
815- ret = __update_freelist_fast (slab , freelist_old , counters_old ,
816- freelist_new , counters_new );
817- } else {
818- ret = __update_freelist_slow (slab , freelist_old , counters_old ,
819- freelist_new , counters_new );
820- }
807+ if (s -> flags & __CMPXCHG_DOUBLE )
808+ ret = __update_freelist_fast (slab , old , new );
809+ else
810+ ret = __update_freelist_slow (slab , old , new );
811+
821812 if (likely (ret ))
822813 return true;
823814
@@ -832,21 +823,17 @@ static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *sla
832823}
833824
834825static inline bool slab_update_freelist (struct kmem_cache * s , struct slab * slab ,
835- void * freelist_old , unsigned long counters_old ,
836- void * freelist_new , unsigned long counters_new ,
837- const char * n )
826+ struct freelist_counters * old , struct freelist_counters * new , const char * n )
838827{
839828 bool ret ;
840829
841830 if (s -> flags & __CMPXCHG_DOUBLE ) {
842- ret = __update_freelist_fast (slab , freelist_old , counters_old ,
843- freelist_new , counters_new );
831+ ret = __update_freelist_fast (slab , old , new );
844832 } else {
845833 unsigned long flags ;
846834
847835 local_irq_save (flags );
848- ret = __update_freelist_slow (slab , freelist_old , counters_old ,
849- freelist_new , counters_new );
836+ ret = __update_freelist_slow (slab , old , new );
850837 local_irq_restore (flags );
851838 }
852839 if (likely (ret ))
@@ -3774,10 +3761,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
37743761 } else {
37753762 new .freelist = old .freelist ;
37763763 }
3777- } while (!slab_update_freelist (s , slab ,
3778- old .freelist , old .counters ,
3779- new .freelist , new .counters ,
3780- "unfreezing slab" ));
3764+ } while (!slab_update_freelist (s , slab , & old , & new , "unfreezing slab" ));
37813765
37823766 /*
37833767 * Stage three: Manipulate the slab list based on the updated state.
@@ -4389,54 +4373,47 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
43894373 */
43904374static inline void * get_freelist (struct kmem_cache * s , struct slab * slab )
43914375{
4392- struct freelist_counters new ;
4393- unsigned long counters ;
4394- void * freelist ;
4376+ struct freelist_counters old , new ;
43954377
43964378 lockdep_assert_held (this_cpu_ptr (& s -> cpu_slab -> lock ));
43974379
43984380 do {
4399- freelist = slab -> freelist ;
4400- counters = slab -> counters ;
4381+ old . freelist = slab -> freelist ;
4382+ old . counters = slab -> counters ;
44014383
4402- new .counters = counters ;
4384+ new .freelist = NULL ;
4385+ new .counters = old .counters ;
44034386
4404- new .inuse = slab -> objects ;
4405- new .frozen = freelist != NULL ;
4387+ new .inuse = old . objects ;
4388+ new .frozen = old . freelist != NULL ;
44064389
4407- } while (!__slab_update_freelist (s , slab ,
4408- freelist , counters ,
4409- NULL , new .counters ,
4410- "get_freelist" ));
44114390
4412- return freelist ;
4391+ } while (!__slab_update_freelist (s , slab , & old , & new , "get_freelist" ));
4392+
4393+ return old .freelist ;
44134394}
44144395
44154396/*
44164397 * Freeze the partial slab and return the pointer to the freelist.
44174398 */
44184399static inline void * freeze_slab (struct kmem_cache * s , struct slab * slab )
44194400{
4420- struct freelist_counters new ;
4421- unsigned long counters ;
4422- void * freelist ;
4401+ struct freelist_counters old , new ;
44234402
44244403 do {
4425- freelist = slab -> freelist ;
4426- counters = slab -> counters ;
4404+ old . freelist = slab -> freelist ;
4405+ old . counters = slab -> counters ;
44274406
4428- new .counters = counters ;
4407+ new .freelist = NULL ;
4408+ new .counters = old .counters ;
44294409 VM_BUG_ON (new .frozen );
44304410
4431- new .inuse = slab -> objects ;
4411+ new .inuse = old . objects ;
44324412 new .frozen = 1 ;
44334413
4434- } while (!slab_update_freelist (s , slab ,
4435- freelist , counters ,
4436- NULL , new .counters ,
4437- "freeze_slab" ));
4414+ } while (!slab_update_freelist (s , slab , & old , & new , "freeze_slab" ));
44384415
4439- return freelist ;
4416+ return old . freelist ;
44404417}
44414418
44424419/*
@@ -5864,10 +5841,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
58645841 unsigned long addr )
58655842
58665843{
5867- void * old_head ;
58685844 bool was_frozen , was_full ;
5869- struct freelist_counters new ;
5870- unsigned long counters ;
5845+ struct freelist_counters old , new ;
58715846 struct kmem_cache_node * n = NULL ;
58725847 unsigned long flags ;
58735848 bool on_node_partial ;
@@ -5891,13 +5866,19 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
58915866 spin_unlock_irqrestore (& n -> list_lock , flags );
58925867 n = NULL ;
58935868 }
5894- old_head = slab -> freelist ;
5895- counters = slab -> counters ;
5896- set_freepointer (s , tail , old_head );
5897- new .counters = counters ;
5898- was_frozen = !!new .frozen ;
5899- was_full = (old_head == NULL );
5869+
5870+ old .freelist = slab -> freelist ;
5871+ old .counters = slab -> counters ;
5872+
5873+ was_full = (old .freelist == NULL );
5874+ was_frozen = old .frozen ;
5875+
5876+ set_freepointer (s , tail , old .freelist );
5877+
5878+ new .freelist = head ;
5879+ new .counters = old .counters ;
59005880 new .inuse -= cnt ;
5881+
59015882 /*
59025883 * Might need to be taken off (due to becoming empty) or added
59035884 * to (due to not being full anymore) the partial list.
@@ -5926,10 +5907,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
59265907 }
59275908 }
59285909
5929- } while (!slab_update_freelist (s , slab ,
5930- old_head , counters ,
5931- head , new .counters ,
5932- "__slab_free" ));
5910+ } while (!slab_update_freelist (s , slab , & old , & new , "__slab_free" ));
59335911
59345912 if (likely (!n )) {
59355913
0 commit comments