@@ -1890,13 +1890,15 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
18901890fail :
18911891 trace_percpu_alloc_percpu_fail (reserved , is_atomic , size , align );
18921892
1893- if (! is_atomic && do_warn && warn_limit ) {
1893+ if (do_warn && warn_limit ) {
18941894 pr_warn ("allocation failed, size=%zu align=%zu atomic=%d, %s\n" ,
18951895 size , align , is_atomic , err );
1896- dump_stack ();
1896+ if (!is_atomic )
1897+ dump_stack ();
18971898 if (!-- warn_limit )
18981899 pr_info ("limit reached, disable warning\n" );
18991900 }
1901+
19001902 if (is_atomic ) {
19011903 /* see the flag handling in pcpu_balance_workfn() */
19021904 pcpu_atomic_alloc_failed = true;
@@ -2581,14 +2583,12 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
25812583{
25822584 size_t size_sum = ai -> static_size + ai -> reserved_size + ai -> dyn_size ;
25832585 size_t static_size , dyn_size ;
2584- struct pcpu_chunk * chunk ;
25852586 unsigned long * group_offsets ;
25862587 size_t * group_sizes ;
25872588 unsigned long * unit_off ;
25882589 unsigned int cpu ;
25892590 int * unit_map ;
25902591 int group , unit , i ;
2591- int map_size ;
25922592 unsigned long tmp_addr ;
25932593 size_t alloc_size ;
25942594
@@ -2615,7 +2615,6 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
26152615 PCPU_SETUP_BUG_ON (ai -> unit_size < PCPU_MIN_UNIT_SIZE );
26162616 PCPU_SETUP_BUG_ON (!IS_ALIGNED (ai -> unit_size , PCPU_BITMAP_BLOCK_SIZE ));
26172617 PCPU_SETUP_BUG_ON (ai -> dyn_size < PERCPU_DYNAMIC_EARLY_SIZE );
2618- PCPU_SETUP_BUG_ON (!ai -> dyn_size );
26192618 PCPU_SETUP_BUG_ON (!IS_ALIGNED (ai -> reserved_size , PCPU_MIN_ALLOC_SIZE ));
26202619 PCPU_SETUP_BUG_ON (!(IS_ALIGNED (PCPU_BITMAP_BLOCK_SIZE , PAGE_SIZE ) ||
26212620 IS_ALIGNED (PAGE_SIZE , PCPU_BITMAP_BLOCK_SIZE )));
@@ -2698,7 +2697,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
26982697 pcpu_unit_pages = ai -> unit_size >> PAGE_SHIFT ;
26992698 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT ;
27002699 pcpu_atom_size = ai -> atom_size ;
2701- pcpu_chunk_struct_size = struct_size (chunk , populated ,
2700+ pcpu_chunk_struct_size = struct_size (( struct pcpu_chunk * ) 0 , populated ,
27022701 BITS_TO_LONGS (pcpu_unit_pages ));
27032702
27042703 pcpu_stats_save_ai (ai );
@@ -2735,29 +2734,23 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
27352734 dyn_size = ai -> dyn_size - (static_size - ai -> static_size );
27362735
27372736 /*
2738- * Initialize first chunk.
2739- * If the reserved_size is non-zero, this initializes the reserved
2740- * chunk. If the reserved_size is zero, the reserved chunk is NULL
2741- * and the dynamic region is initialized here. The first chunk,
2742- * pcpu_first_chunk, will always point to the chunk that serves
2743- * the dynamic region.
2737+ * Initialize first chunk:
2738+ * This chunk is broken up into 3 parts:
2739+ * < static | [reserved] | dynamic >
2740+ * - static - there is no backing chunk because these allocations can
2741+ * never be freed.
2742+ * - reserved (pcpu_reserved_chunk) - exists primarily to serve
2743+ * allocations from module load.
2744+ * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
2745+ * chunk.
27442746 */
27452747 tmp_addr = (unsigned long )base_addr + static_size ;
2746- map_size = ai -> reserved_size ?: dyn_size ;
2747- chunk = pcpu_alloc_first_chunk (tmp_addr , map_size );
2748-
2749- /* init dynamic chunk if necessary */
2750- if (ai -> reserved_size ) {
2751- pcpu_reserved_chunk = chunk ;
2748+ if (ai -> reserved_size )
2749+ pcpu_reserved_chunk = pcpu_alloc_first_chunk (tmp_addr ,
2750+ ai -> reserved_size );
2751+ tmp_addr = (unsigned long )base_addr + static_size + ai -> reserved_size ;
2752+ pcpu_first_chunk = pcpu_alloc_first_chunk (tmp_addr , dyn_size );
27522753
2753- tmp_addr = (unsigned long )base_addr + static_size +
2754- ai -> reserved_size ;
2755- map_size = dyn_size ;
2756- chunk = pcpu_alloc_first_chunk (tmp_addr , map_size );
2757- }
2758-
2759- /* link the first chunk in */
2760- pcpu_first_chunk = chunk ;
27612754 pcpu_nr_empty_pop_pages = pcpu_first_chunk -> nr_empty_pop_pages ;
27622755 pcpu_chunk_relocate (pcpu_first_chunk , -1 );
27632756
@@ -3189,32 +3182,26 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
31893182 pmd_t * pmd ;
31903183
31913184 if (pgd_none (* pgd )) {
3192- p4d_t * new ;
3193-
3194- new = memblock_alloc (P4D_TABLE_SIZE , P4D_TABLE_SIZE );
3195- if (!new )
3185+ p4d = memblock_alloc (P4D_TABLE_SIZE , P4D_TABLE_SIZE );
3186+ if (!p4d )
31963187 goto err_alloc ;
3197- pgd_populate (& init_mm , pgd , new );
3188+ pgd_populate (& init_mm , pgd , p4d );
31983189 }
31993190
32003191 p4d = p4d_offset (pgd , addr );
32013192 if (p4d_none (* p4d )) {
3202- pud_t * new ;
3203-
3204- new = memblock_alloc (PUD_TABLE_SIZE , PUD_TABLE_SIZE );
3205- if (!new )
3193+ pud = memblock_alloc (PUD_TABLE_SIZE , PUD_TABLE_SIZE );
3194+ if (!pud )
32063195 goto err_alloc ;
3207- p4d_populate (& init_mm , p4d , new );
3196+ p4d_populate (& init_mm , p4d , pud );
32083197 }
32093198
32103199 pud = pud_offset (p4d , addr );
32113200 if (pud_none (* pud )) {
3212- pmd_t * new ;
3213-
3214- new = memblock_alloc (PMD_TABLE_SIZE , PMD_TABLE_SIZE );
3215- if (!new )
3201+ pmd = memblock_alloc (PMD_TABLE_SIZE , PMD_TABLE_SIZE );
3202+ if (!pmd )
32163203 goto err_alloc ;
3217- pud_populate (& init_mm , pud , new );
3204+ pud_populate (& init_mm , pud , pmd );
32183205 }
32193206
32203207 pmd = pmd_offset (pud , addr );
0 commit comments