1313#include <linux/percpu.h>
1414#include <linux/sched.h>
1515#include <linux/sched/topology.h>
16+ #include <linux/cpu.h>
1617
1718#include <asm/topology.h>
1819
19- /*
20- * cpu topology table
21- */
22- struct cputopo_parisc cpu_topology [NR_CPUS ] __read_mostly ;
23- EXPORT_SYMBOL_GPL (cpu_topology );
24-
25- const struct cpumask * cpu_coregroup_mask (int cpu )
26- {
27- return & cpu_topology [cpu ].core_sibling ;
28- }
29-
30- static void update_siblings_masks (unsigned int cpuid )
31- {
32- struct cputopo_parisc * cpu_topo , * cpuid_topo = & cpu_topology [cpuid ];
33- int cpu ;
34-
35- /* update core and thread sibling masks */
36- for_each_possible_cpu (cpu ) {
37- cpu_topo = & cpu_topology [cpu ];
38-
39- if (cpuid_topo -> socket_id != cpu_topo -> socket_id )
40- continue ;
41-
42- cpumask_set_cpu (cpuid , & cpu_topo -> core_sibling );
43- if (cpu != cpuid )
44- cpumask_set_cpu (cpu , & cpuid_topo -> core_sibling );
45-
46- if (cpuid_topo -> core_id != cpu_topo -> core_id )
47- continue ;
48-
49- cpumask_set_cpu (cpuid , & cpu_topo -> thread_sibling );
50- if (cpu != cpuid )
51- cpumask_set_cpu (cpu , & cpuid_topo -> thread_sibling );
52- }
53- smp_wmb ();
54- }
20+ static DEFINE_PER_CPU (struct cpu , cpu_devices ) ;
5521
5622static int dualcores_found __initdata ;
5723
@@ -62,7 +28,7 @@ static int dualcores_found __initdata;
6228 */
6329void __init store_cpu_topology (unsigned int cpuid )
6430{
65- struct cputopo_parisc * cpuid_topo = & cpu_topology [cpuid ];
31+ struct cpu_topology * cpuid_topo = & cpu_topology [cpuid ];
6632 struct cpuinfo_parisc * p ;
6733 int max_socket = -1 ;
6834 unsigned long cpu ;
@@ -71,6 +37,12 @@ void __init store_cpu_topology(unsigned int cpuid)
7137 if (cpuid_topo -> core_id != -1 )
7238 return ;
7339
40+ #ifdef CONFIG_HOTPLUG_CPU
41+ per_cpu (cpu_devices , cpuid ).hotpluggable = 1 ;
42+ #endif
43+ if (register_cpu (& per_cpu (cpu_devices , cpuid ), cpuid ))
44+ pr_warn ("Failed to register CPU%d device" , cpuid );
45+
7446 /* create cpu topology mapping */
7547 cpuid_topo -> thread_id = -1 ;
7648 cpuid_topo -> core_id = 0 ;
@@ -86,25 +58,25 @@ void __init store_cpu_topology(unsigned int cpuid)
8658 cpuid_topo -> core_id = cpu_topology [cpu ].core_id ;
8759 if (p -> cpu_loc ) {
8860 cpuid_topo -> core_id ++ ;
89- cpuid_topo -> socket_id = cpu_topology [cpu ].socket_id ;
61+ cpuid_topo -> package_id = cpu_topology [cpu ].package_id ;
9062 dualcores_found = 1 ;
9163 continue ;
9264 }
9365 }
9466
95- if (cpuid_topo -> socket_id == -1 )
96- max_socket = max (max_socket , cpu_topology [cpu ].socket_id );
67+ if (cpuid_topo -> package_id == -1 )
68+ max_socket = max (max_socket , cpu_topology [cpu ].package_id );
9769 }
9870
99- if (cpuid_topo -> socket_id == -1 )
100- cpuid_topo -> socket_id = max_socket + 1 ;
71+ if (cpuid_topo -> package_id == -1 )
72+ cpuid_topo -> package_id = max_socket + 1 ;
10173
10274 update_siblings_masks (cpuid );
10375
10476 pr_info ("CPU%u: cpu core %d of socket %d\n" ,
10577 cpuid ,
10678 cpu_topology [cpuid ].core_id ,
107- cpu_topology [cpuid ].socket_id );
79+ cpu_topology [cpuid ].package_id );
10880}
10981
11082static struct sched_domain_topology_level parisc_mc_topology [] = {
@@ -122,20 +94,6 @@ static struct sched_domain_topology_level parisc_mc_topology[] = {
12294 */
12395void __init init_cpu_topology (void )
12496{
125- unsigned int cpu ;
126-
127- /* init core mask and capacity */
128- for_each_possible_cpu (cpu ) {
129- struct cputopo_parisc * cpu_topo = & (cpu_topology [cpu ]);
130-
131- cpu_topo -> thread_id = -1 ;
132- cpu_topo -> core_id = -1 ;
133- cpu_topo -> socket_id = -1 ;
134- cpumask_clear (& cpu_topo -> core_sibling );
135- cpumask_clear (& cpu_topo -> thread_sibling );
136- }
137- smp_wmb ();
138-
13997 /* Set scheduler topology descriptor */
14098 if (dualcores_found )
14199 set_sched_topology (parisc_mc_topology );
0 commit comments