@@ -112,30 +112,6 @@ static unsigned long pfn_next(unsigned long pfn)
112112#define for_each_device_pfn (pfn , map , i ) \
113113 for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
114114
115- static void dev_pagemap_kill (struct dev_pagemap * pgmap )
116- {
117- if (pgmap -> ops && pgmap -> ops -> kill )
118- pgmap -> ops -> kill (pgmap );
119- else
120- percpu_ref_kill (pgmap -> ref );
121- }
122-
123- static void dev_pagemap_cleanup (struct dev_pagemap * pgmap )
124- {
125- if (pgmap -> ops && pgmap -> ops -> cleanup ) {
126- pgmap -> ops -> cleanup (pgmap );
127- } else {
128- wait_for_completion (& pgmap -> done );
129- percpu_ref_exit (pgmap -> ref );
130- }
131- /*
132- * Undo the pgmap ref assignment for the internal case as the
133- * caller may re-enable the same pgmap.
134- */
135- if (pgmap -> ref == & pgmap -> internal_ref )
136- pgmap -> ref = NULL ;
137- }
138-
139115static void pageunmap_range (struct dev_pagemap * pgmap , int range_id )
140116{
141117 struct range * range = & pgmap -> ranges [range_id ];
@@ -167,11 +143,12 @@ void memunmap_pages(struct dev_pagemap *pgmap)
167143 unsigned long pfn ;
168144 int i ;
169145
170- dev_pagemap_kill ( pgmap );
146+ percpu_ref_kill ( & pgmap -> ref );
171147 for (i = 0 ; i < pgmap -> nr_range ; i ++ )
172148 for_each_device_pfn (pfn , pgmap , i )
173149 put_page (pfn_to_page (pfn ));
174- dev_pagemap_cleanup (pgmap );
150+ wait_for_completion (& pgmap -> done );
151+ percpu_ref_exit (& pgmap -> ref );
175152
176153 for (i = 0 ; i < pgmap -> nr_range ; i ++ )
177154 pageunmap_range (pgmap , i );
@@ -188,8 +165,7 @@ static void devm_memremap_pages_release(void *data)
188165
189166static void dev_pagemap_percpu_release (struct percpu_ref * ref )
190167{
191- struct dev_pagemap * pgmap =
192- container_of (ref , struct dev_pagemap , internal_ref );
168+ struct dev_pagemap * pgmap = container_of (ref , struct dev_pagemap , ref );
193169
194170 complete (& pgmap -> done );
195171}
@@ -295,8 +271,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
295271 memmap_init_zone_device (& NODE_DATA (nid )-> node_zones [ZONE_DEVICE ],
296272 PHYS_PFN (range -> start ),
297273 PHYS_PFN (range_len (range )), pgmap );
298- percpu_ref_get_many (pgmap -> ref , pfn_end ( pgmap , range_id )
299- - pfn_first (pgmap , range_id ));
274+ percpu_ref_get_many (& pgmap -> ref ,
275+ pfn_end ( pgmap , range_id ) - pfn_first (pgmap , range_id ));
300276 return 0 ;
301277
302278err_add_memory :
@@ -362,22 +338,11 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
362338 break ;
363339 }
364340
365- if (!pgmap -> ref ) {
366- if (pgmap -> ops && (pgmap -> ops -> kill || pgmap -> ops -> cleanup ))
367- return ERR_PTR (- EINVAL );
368-
369- init_completion (& pgmap -> done );
370- error = percpu_ref_init (& pgmap -> internal_ref ,
371- dev_pagemap_percpu_release , 0 , GFP_KERNEL );
372- if (error )
373- return ERR_PTR (error );
374- pgmap -> ref = & pgmap -> internal_ref ;
375- } else {
376- if (!pgmap -> ops || !pgmap -> ops -> kill || !pgmap -> ops -> cleanup ) {
377- WARN (1 , "Missing reference count teardown definition\n" );
378- return ERR_PTR (- EINVAL );
379- }
380- }
341+ init_completion (& pgmap -> done );
342+ error = percpu_ref_init (& pgmap -> ref , dev_pagemap_percpu_release , 0 ,
343+ GFP_KERNEL );
344+ if (error )
345+ return ERR_PTR (error );
381346
382347 devmap_managed_enable_get (pgmap );
383348
@@ -486,7 +451,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
486451 /* fall back to slow path lookup */
487452 rcu_read_lock ();
488453 pgmap = xa_load (& pgmap_array , PHYS_PFN (phys ));
489- if (pgmap && !percpu_ref_tryget_live (pgmap -> ref ))
454+ if (pgmap && !percpu_ref_tryget_live (& pgmap -> ref ))
490455 pgmap = NULL ;
491456 rcu_read_unlock ();
492457
0 commit comments