|
29 | 29 | #define MAX_STACK_ENTRIES 100 |
30 | 30 | #define STACK_ERR_BUF_SIZE 128 |
31 | 31 |
|
| 32 | +#define SIGNALS_TIMEOUT 15 |
| 33 | + |
32 | 34 | struct klp_patch *klp_transition_patch; |
33 | 35 |
|
34 | 36 | static int klp_target_state = KLP_UNDEFINED; |
35 | 37 |
|
| 38 | +static unsigned int klp_signals_cnt; |
| 39 | + |
36 | 40 | /* |
37 | 41 | * This work can be performed periodically to finish patching or unpatching any |
38 | 42 | * "straggler" tasks which failed to transition in the first attempt. |
@@ -343,6 +347,47 @@ static bool klp_try_switch_task(struct task_struct *task) |
343 | 347 |
|
344 | 348 | } |
345 | 349 |
|
| 350 | +/* |
| 351 | + * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
| 352 | + * Kthreads with TIF_PATCH_PENDING set are woken up. |
| 353 | + */ |
| 354 | +static void klp_send_signals(void) |
| 355 | +{ |
| 356 | + struct task_struct *g, *task; |
| 357 | + |
| 358 | + if (klp_signals_cnt == SIGNALS_TIMEOUT) |
| 359 | + pr_notice("signaling remaining tasks\n"); |
| 360 | + |
| 361 | + read_lock(&tasklist_lock); |
| 362 | + for_each_process_thread(g, task) { |
| 363 | + if (!klp_patch_pending(task)) |
| 364 | + continue; |
| 365 | + |
| 366 | + /* |
| 367 | + * There is a small race here. We could see TIF_PATCH_PENDING |
| 368 | + * set and decide to wake up a kthread or send a fake signal. |
| 369 | + * Meanwhile the task could migrate itself and the action |
| 370 | + * would be meaningless. It is not serious though. |
| 371 | + */ |
| 372 | + if (task->flags & PF_KTHREAD) { |
| 373 | + /* |
| 374 | + * Wake up a kthread which sleeps interruptedly and |
| 375 | + * still has not been migrated. |
| 376 | + */ |
| 377 | + wake_up_state(task, TASK_INTERRUPTIBLE); |
| 378 | + } else { |
| 379 | + /* |
| 380 | + * Send fake signal to all non-kthread tasks which are |
| 381 | + * still not migrated. |
| 382 | + */ |
| 383 | + spin_lock_irq(&task->sighand->siglock); |
| 384 | + signal_wake_up(task, 0); |
| 385 | + spin_unlock_irq(&task->sighand->siglock); |
| 386 | + } |
| 387 | + } |
| 388 | + read_unlock(&tasklist_lock); |
| 389 | +} |
| 390 | + |
346 | 391 | /* |
347 | 392 | * Try to switch all remaining tasks to the target patch state by walking the |
348 | 393 | * stacks of sleeping tasks and looking for any to-be-patched or |
@@ -393,6 +438,10 @@ void klp_try_complete_transition(void) |
393 | 438 | put_online_cpus(); |
394 | 439 |
|
395 | 440 | if (!complete) { |
| 441 | + if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
| 442 | + klp_send_signals(); |
| 443 | + klp_signals_cnt++; |
| 444 | + |
396 | 445 | /* |
397 | 446 | * Some tasks weren't able to be switched over. Try again |
398 | 447 | * later and/or wait for other methods like kernel exit |
@@ -454,6 +503,8 @@ void klp_start_transition(void) |
454 | 503 | if (task->patch_state != klp_target_state) |
455 | 504 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); |
456 | 505 | } |
| 506 | + |
| 507 | + klp_signals_cnt = 0; |
457 | 508 | } |
458 | 509 |
|
459 | 510 | /* |
@@ -576,47 +627,6 @@ void klp_copy_process(struct task_struct *child) |
576 | 627 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ |
577 | 628 | } |
578 | 629 |
|
579 | | -/* |
580 | | - * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
581 | | - * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this |
582 | | - * action currently. |
583 | | - */ |
584 | | -void klp_send_signals(void) |
585 | | -{ |
586 | | - struct task_struct *g, *task; |
587 | | - |
588 | | - pr_notice("signaling remaining tasks\n"); |
589 | | - |
590 | | - read_lock(&tasklist_lock); |
591 | | - for_each_process_thread(g, task) { |
592 | | - if (!klp_patch_pending(task)) |
593 | | - continue; |
594 | | - |
595 | | - /* |
596 | | - * There is a small race here. We could see TIF_PATCH_PENDING |
597 | | - * set and decide to wake up a kthread or send a fake signal. |
598 | | - * Meanwhile the task could migrate itself and the action |
599 | | - * would be meaningless. It is not serious though. |
600 | | - */ |
601 | | - if (task->flags & PF_KTHREAD) { |
602 | | - /* |
603 | | - * Wake up a kthread which sleeps interruptedly and |
604 | | - * still has not been migrated. |
605 | | - */ |
606 | | - wake_up_state(task, TASK_INTERRUPTIBLE); |
607 | | - } else { |
608 | | - /* |
609 | | - * Send fake signal to all non-kthread tasks which are |
610 | | - * still not migrated. |
611 | | - */ |
612 | | - spin_lock_irq(&task->sighand->siglock); |
613 | | - signal_wake_up(task, 0); |
614 | | - spin_unlock_irq(&task->sighand->siglock); |
615 | | - } |
616 | | - } |
617 | | - read_unlock(&tasklist_lock); |
618 | | -} |
619 | | - |
620 | 630 | /* |
621 | 631 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an |
622 | 632 | * existing transition to finish. |
|
0 commit comments