Skip to content

Commit b34111a

Browse files
committed
Merge tag 'smp-core-2025-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp updates from Thomas Gleixner: "A set of updates for SMP function calls: - Improve locality of smp_call_function_any() by utilizing sched_numa_find_nth_cpu() instead of picking a random CPU - Wait for work completion in smp_call_function_many_cond() only when there was actually work enqueued - Simplify functions by unutlizing the appropriate cpumask_*() interfaces - Trivial cleanups" * tag 'smp-core-2025-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: smp: Wait only if work was enqueued smp: Defer check for local execution in smp_call_function_many_cond() smp: Use cpumask_any_but() in smp_call_function_many_cond() smp: Improve locality in smp_call_function_any() smp: Fix typo in comment for raw_smp_processor_id()
2 parents dba3ec9 + 946a728 commit b34111a

2 files changed

Lines changed: 15 additions & 31 deletions

File tree

include/linux/smp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ static inline int get_boot_cpu_id(void)
234234
#endif /* !SMP */
235235

236236
/**
237-
* raw_processor_id() - get the current (unstable) CPU id
237+
* raw_smp_processor_id() - get the current (unstable) CPU id
238238
*
239239
* For then you know what you are doing and need an unstable
240240
* CPU id.

kernel/smp.c

Lines changed: 14 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -741,32 +741,19 @@ EXPORT_SYMBOL_GPL(smp_call_function_single_async);
741741
*
742742
* Selection preference:
743743
* 1) current cpu if in @mask
744-
* 2) any cpu of current node if in @mask
745-
* 3) any other online cpu in @mask
744+
* 2) nearest cpu in @mask, based on NUMA topology
746745
*/
747746
int smp_call_function_any(const struct cpumask *mask,
748747
smp_call_func_t func, void *info, int wait)
749748
{
750749
unsigned int cpu;
751-
const struct cpumask *nodemask;
752750
int ret;
753751

754752
/* Try for same CPU (cheapest) */
755753
cpu = get_cpu();
756-
if (cpumask_test_cpu(cpu, mask))
757-
goto call;
758-
759-
/* Try for same node. */
760-
nodemask = cpumask_of_node(cpu_to_node(cpu));
761-
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
762-
cpu = cpumask_next_and(cpu, nodemask, mask)) {
763-
if (cpu_online(cpu))
764-
goto call;
765-
}
754+
if (!cpumask_test_cpu(cpu, mask))
755+
cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu));
766756

767-
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
768-
cpu = cpumask_any_and(mask, cpu_online_mask);
769-
call:
770757
ret = smp_call_function_single(cpu, func, info, wait);
771758
put_cpu();
772759
return ret;
@@ -792,7 +779,6 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
792779
bool wait = scf_flags & SCF_WAIT;
793780
int nr_cpus = 0;
794781
bool run_remote = false;
795-
bool run_local = false;
796782

797783
lockdep_assert_preemption_disabled();
798784

@@ -814,19 +800,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
814800
*/
815801
WARN_ON_ONCE(!in_task());
816802

817-
/* Check if we need local execution. */
818-
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
819-
(!cond_func || cond_func(this_cpu, info)))
820-
run_local = true;
821-
822803
/* Check if we need remote execution, i.e., any CPU excluding this one. */
823-
cpu = cpumask_first_and(mask, cpu_online_mask);
824-
if (cpu == this_cpu)
825-
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
826-
if (cpu < nr_cpu_ids)
827-
run_remote = true;
828-
829-
if (run_remote) {
804+
if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) {
830805
cfd = this_cpu_ptr(&cfd_data);
831806
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
832807
__cpumask_clear_cpu(this_cpu, cfd->cpumask);
@@ -840,6 +815,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
840815
continue;
841816
}
842817

818+
/* Work is enqueued on a remote CPU. */
819+
run_remote = true;
820+
843821
csd_lock(csd);
844822
if (wait)
845823
csd->node.u_flags |= CSD_TYPE_SYNC;
@@ -851,6 +829,10 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
851829
#endif
852830
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
853831

832+
/*
833+
* Kick the remote CPU if this is the first work
834+
* item enqueued.
835+
*/
854836
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
855837
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
856838
nr_cpus++;
@@ -869,7 +851,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
869851
send_call_function_ipi_mask(cfd->cpumask_ipi);
870852
}
871853

872-
if (run_local) {
854+
/* Check if we need local execution. */
855+
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
856+
(!cond_func || cond_func(this_cpu, info))) {
873857
unsigned long flags;
874858

875859
local_irq_save(flags);

0 commit comments

Comments
 (0)