99
1010#include <linux/cpu.h>
1111#include <linux/stacktrace.h>
12+ #include <linux/static_call.h>
1213#include "core.h"
1314#include "patch.h"
1415#include "transition.h"
@@ -26,6 +27,25 @@ static int klp_target_state = KLP_UNDEFINED;
2627
2728static unsigned int klp_signals_cnt ;
2829
30+ /*
31+ * When a livepatch is in progress, enable klp stack checking in
32+ * cond_resched(). This helps CPU-bound kthreads get patched.
33+ */
34+ #if defined(CONFIG_PREEMPT_DYNAMIC ) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL )
35+
36+ #define klp_cond_resched_enable () sched_dynamic_klp_enable()
37+ #define klp_cond_resched_disable () sched_dynamic_klp_disable()
38+
39+ #else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
40+
41+ DEFINE_STATIC_KEY_FALSE (klp_sched_try_switch_key );
42+ EXPORT_SYMBOL (klp_sched_try_switch_key );
43+
44+ #define klp_cond_resched_enable () static_branch_enable(&klp_sched_try_switch_key)
45+ #define klp_cond_resched_disable () static_branch_disable(&klp_sched_try_switch_key)
46+
47+ #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
48+
2949/*
3050 * This work can be performed periodically to finish patching or unpatching any
3151 * "straggler" tasks which failed to transition in the first attempt.
@@ -174,8 +194,8 @@ void klp_update_patch_state(struct task_struct *task)
174194 * barrier (smp_rmb) for two cases:
175195 *
176196 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
177- * klp_target_state read. The corresponding write barrier is in
178- * klp_init_transition().
197+ * klp_target_state read. The corresponding write barriers are in
198+ * klp_init_transition() and klp_reverse_transition() .
179199 *
180200 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
181201 * of func->transition, if klp_ftrace_handler() is called later on
@@ -343,6 +363,44 @@ static bool klp_try_switch_task(struct task_struct *task)
343363 return !ret ;
344364}
345365
366+ void __klp_sched_try_switch (void )
367+ {
368+ if (likely (!klp_patch_pending (current )))
369+ return ;
370+
371+ /*
372+ * This function is called from cond_resched() which is called in many
373+ * places throughout the kernel. Using the klp_mutex here might
374+ * deadlock.
375+ *
376+ * Instead, disable preemption to prevent racing with other callers of
377+ * klp_try_switch_task(). Thanks to task_call_func() they won't be
378+ * able to switch this task while it's running.
379+ */
380+ preempt_disable ();
381+
382+ /*
383+ * Make sure current didn't get patched between the above check and
384+ * preempt_disable().
385+ */
386+ if (unlikely (!klp_patch_pending (current )))
387+ goto out ;
388+
389+ /*
390+ * Enforce the order of the TIF_PATCH_PENDING read above and the
391+ * klp_target_state read in klp_try_switch_task(). The corresponding
392+ * write barriers are in klp_init_transition() and
393+ * klp_reverse_transition().
394+ */
395+ smp_rmb ();
396+
397+ klp_try_switch_task (current );
398+
399+ out :
400+ preempt_enable ();
401+ }
402+ EXPORT_SYMBOL (__klp_sched_try_switch );
403+
346404/*
347405 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
348406 * Kthreads with TIF_PATCH_PENDING set are woken up.
@@ -449,7 +507,8 @@ void klp_try_complete_transition(void)
449507 return ;
450508 }
451509
452- /* we're done, now cleanup the data structures */
510+ /* Done! Now cleanup the data structures. */
511+ klp_cond_resched_disable ();
453512 patch = klp_transition_patch ;
454513 klp_complete_transition ();
455514
@@ -501,6 +560,8 @@ void klp_start_transition(void)
501560 set_tsk_thread_flag (task , TIF_PATCH_PENDING );
502561 }
503562
563+ klp_cond_resched_enable ();
564+
504565 klp_signals_cnt = 0 ;
505566}
506567
@@ -556,8 +617,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
556617 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
557618 *
558619 * Also enforce the order of the klp_target_state write and future
559- * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
560- * set a task->patch_state to KLP_UNDEFINED.
620+ * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and
621+ * __klp_sched_try_switch() don't set a task->patch_state to
622+ * KLP_UNDEFINED.
561623 */
562624 smp_wmb ();
563625
@@ -593,14 +655,10 @@ void klp_reverse_transition(void)
593655 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
594656 "unpatching to patching" );
595657
596- klp_transition_patch -> enabled = !klp_transition_patch -> enabled ;
597-
598- klp_target_state = !klp_target_state ;
599-
600658 /*
601659 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
602- * klp_update_patch_state() running in parallel with
603- * klp_start_transition() .
660+ * klp_update_patch_state() or __klp_sched_try_switch() running in
661+ * parallel with the reverse transition .
604662 */
605663 read_lock (& tasklist_lock );
606664 for_each_process_thread (g , task )
@@ -610,9 +668,28 @@ void klp_reverse_transition(void)
610668 for_each_possible_cpu (cpu )
611669 clear_tsk_thread_flag (idle_task (cpu ), TIF_PATCH_PENDING );
612670
613- /* Let any remaining calls to klp_update_patch_state() complete */
671+ /*
672+ * Make sure all existing invocations of klp_update_patch_state() and
673+ * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before
674+ * starting the reverse transition.
675+ */
614676 klp_synchronize_transition ();
615677
678+ /*
679+ * All patching has stopped, now re-initialize the global variables to
680+ * prepare for the reverse transition.
681+ */
682+ klp_transition_patch -> enabled = !klp_transition_patch -> enabled ;
683+ klp_target_state = !klp_target_state ;
684+
685+ /*
686+ * Enforce the order of the klp_target_state write and the
687+ * TIF_PATCH_PENDING writes in klp_start_transition() to ensure
688+ * klp_update_patch_state() and __klp_sched_try_switch() don't set
689+ * task->patch_state to the wrong value.
690+ */
691+ smp_wmb ();
692+
616693 klp_start_transition ();
617694}
618695
@@ -626,9 +703,9 @@ void klp_copy_process(struct task_struct *child)
626703 * the task flag up to date with the parent here.
627704 *
628705 * The operation is serialized against all klp_*_transition()
629- * operations by the tasklist_lock. The only exception is
630- * klp_update_patch_state(current), but we cannot race with
631- * that because we are current.
706+ * operations by the tasklist_lock. The only exceptions are
707+ * klp_update_patch_state(current) and __klp_sched_try_switch() , but we
708+ * cannot race with them because we are current.
632709 */
633710 if (test_tsk_thread_flag (current , TIF_PATCH_PENDING ))
634711 set_tsk_thread_flag (child , TIF_PATCH_PENDING );
0 commit comments