@@ -325,78 +325,3 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
325325 cpus_read_unlock ();
326326}
327327EXPORT_SYMBOL_GPL (smpboot_unregister_percpu_thread );
328-
329- #ifndef CONFIG_HOTPLUG_CORE_SYNC
330- static DEFINE_PER_CPU (atomic_t , cpu_hotplug_state ) = ATOMIC_INIT (CPU_POST_DEAD );
331-
332- #ifdef CONFIG_HOTPLUG_CPU
333- /*
334- * Wait for the specified CPU to exit the idle loop and die.
335- */
336- bool cpu_wait_death (unsigned int cpu , int seconds )
337- {
338- int jf_left = seconds * HZ ;
339- int oldstate ;
340- bool ret = true;
341- int sleep_jf = 1 ;
342-
343- might_sleep ();
344-
345- /* The outgoing CPU will normally get done quite quickly. */
346- if (atomic_read (& per_cpu (cpu_hotplug_state , cpu )) == CPU_DEAD )
347- goto update_state_early ;
348- udelay (5 );
349-
350- /* But if the outgoing CPU dawdles, wait increasingly long times. */
351- while (atomic_read (& per_cpu (cpu_hotplug_state , cpu )) != CPU_DEAD ) {
352- schedule_timeout_uninterruptible (sleep_jf );
353- jf_left -= sleep_jf ;
354- if (jf_left <= 0 )
355- break ;
356- sleep_jf = DIV_ROUND_UP (sleep_jf * 11 , 10 );
357- }
358- update_state_early :
359- oldstate = atomic_read (& per_cpu (cpu_hotplug_state , cpu ));
360- update_state :
361- if (oldstate == CPU_DEAD ) {
362- /* Outgoing CPU died normally, update state. */
363- smp_mb (); /* atomic_read() before update. */
364- atomic_set (& per_cpu (cpu_hotplug_state , cpu ), CPU_POST_DEAD );
365- } else {
366- /* Outgoing CPU still hasn't died, set state accordingly. */
367- if (!atomic_try_cmpxchg (& per_cpu (cpu_hotplug_state , cpu ),
368- & oldstate , CPU_BROKEN ))
369- goto update_state ;
370- ret = false;
371- }
372- return ret ;
373- }
374-
375- /*
376- * Called by the outgoing CPU to report its successful death. Return
377- * false if this report follows the surviving CPU's timing out.
378- *
379- * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
380- * timed out. This approach allows architectures to omit calls to
381- * cpu_check_up_prepare() and cpu_set_state_online() without defeating
382- * the next cpu_wait_death()'s polling loop.
383- */
384- bool cpu_report_death (void )
385- {
386- int oldstate ;
387- int newstate ;
388- int cpu = smp_processor_id ();
389-
390- oldstate = atomic_read (& per_cpu (cpu_hotplug_state , cpu ));
391- do {
392- if (oldstate != CPU_BROKEN )
393- newstate = CPU_DEAD ;
394- else
395- newstate = CPU_DEAD_FROZEN ;
396- } while (!atomic_try_cmpxchg (& per_cpu (cpu_hotplug_state , cpu ),
397- & oldstate , newstate ));
398- return newstate == CPU_DEAD ;
399- }
400-
401- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
402- #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
0 commit comments