Skip to content

Commit efce842

Browse files
srikardmpe
authored andcommitted
powerpc/paravirt: Improve vcpu_is_preempted
PowerVM Hypervisor dispatches on a whole core basis. In a shared LPAR, a CPU from a core that is CEDED or preempted may have a larger latency. In such a scenario, its preferable to choose a different CPU to run. If one of the CPUs in the core is active, i.e neither CEDED nor preempted, then consider this CPU as not preempted. Also if any of the CPUs in the core has yielded but OS has not requested CEDE or CONFER, then consider this CPU to be preempted. Correct detection of preempted CPUs is important for detecting idle CPUs/cores in task scheduler. Tested-by: Aboorva Devarajan <aboorvad@linux.vnet.ibm.com> Reviewed-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com> Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231019091452.95260-1-srikar@linux.vnet.ibm.com
1 parent e08c43e commit efce842

1 file changed

Lines changed: 44 additions & 3 deletions

File tree

arch/powerpc/include/asm/paravirt.h

Lines changed: 44 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,11 @@ static inline void yield_to_any(void)
7171
{
7272
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
7373
}
74+
75+
static inline bool is_vcpu_idle(int vcpu)
76+
{
77+
return lppaca_of(vcpu).idle;
78+
}
7479
#else
7580
static inline bool is_shared_processor(void)
7681
{
@@ -100,6 +105,10 @@ static inline void prod_cpu(int cpu)
100105
___bad_prod_cpu(); /* This would be a bug */
101106
}
102107

108+
static inline bool is_vcpu_idle(int vcpu)
109+
{
110+
return false;
111+
}
103112
#endif
104113

105114
#define vcpu_is_preempted vcpu_is_preempted
@@ -121,9 +130,23 @@ static inline bool vcpu_is_preempted(int cpu)
121130
if (!is_shared_processor())
122131
return false;
123132

133+
/*
134+
* If the hypervisor has dispatched the target CPU on a physical
135+
* processor, then the target CPU is definitely not preempted.
136+
*/
137+
if (!(yield_count_of(cpu) & 1))
138+
return false;
139+
140+
/*
141+
* If the target CPU has yielded to Hypervisor but OS has not
142+
* requested idle then the target CPU is definitely preempted.
143+
*/
144+
if (!is_vcpu_idle(cpu))
145+
return true;
146+
124147
#ifdef CONFIG_PPC_SPLPAR
125148
if (!is_kvm_guest()) {
126-
int first_cpu;
149+
int first_cpu, i;
127150

128151
/*
129152
* The result of vcpu_is_preempted() is used in a
@@ -149,11 +172,29 @@ static inline bool vcpu_is_preempted(int cpu)
149172
*/
150173
if (cpu_first_thread_sibling(cpu) == first_cpu)
151174
return false;
175+
176+
/*
177+
* If any of the threads of the target CPU's core are not
178+
* preempted or ceded, then consider target CPU to be
179+
* non-preempted.
180+
*/
181+
first_cpu = cpu_first_thread_sibling(cpu);
182+
for (i = first_cpu; i < first_cpu + threads_per_core; i++) {
183+
if (i == cpu)
184+
continue;
185+
if (!(yield_count_of(i) & 1))
186+
return false;
187+
if (!is_vcpu_idle(i))
188+
return true;
189+
}
152190
}
153191
#endif
154192

155-
if (yield_count_of(cpu) & 1)
156-
return true;
193+
/*
194+
* None of the threads in target CPU's core are running but none of
195+
* them were preempted too. Hence assume the target CPU to be
196+
* non-preempted.
197+
*/
157198
return false;
158199
}
159200

0 commit comments

Comments
 (0)