@@ -198,171 +198,6 @@ SYSCALL_DEFINE0(ni_syscall)
198198 return - ENOSYS ;
199199}
200200
201- /**
202- * idtentry_enter - Handle state tracking on ordinary idtentries
203- * @regs: Pointer to pt_regs of interrupted context
204- *
205- * Invokes:
206- * - lockdep irqflag state tracking as low level ASM entry disabled
207- * interrupts.
208- *
209- * - Context tracking if the exception hit user mode.
210- *
211- * - The hardirq tracer to keep the state consistent as low level ASM
212- * entry disabled interrupts.
213- *
214- * As a precondition, this requires that the entry came from user mode,
215- * idle, or a kernel context in which RCU is watching.
216- *
217- * For kernel mode entries RCU handling is done conditional. If RCU is
218- * watching then the only RCU requirement is to check whether the tick has
219- * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
220- * invoked on entry and rcu_irq_exit() on exit.
221- *
222- * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
223- * solves the problem of kernel mode pagefaults which can schedule, which
224- * is not possible after invoking rcu_irq_enter() without undoing it.
225- *
226- * For user mode entries irqentry_enter_from_user_mode() must be invoked to
227- * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
228- * would not be possible.
229- *
230- * Returns: An opaque object that must be passed to idtentry_exit()
231- *
232- * The return value must be fed into the state argument of
233- * idtentry_exit().
234- */
235- idtentry_state_t noinstr idtentry_enter (struct pt_regs * regs )
236- {
237- idtentry_state_t ret = {
238- .exit_rcu = false,
239- };
240-
241- if (user_mode (regs )) {
242- irqentry_enter_from_user_mode (regs );
243- return ret ;
244- }
245-
246- /*
247- * If this entry hit the idle task invoke rcu_irq_enter() whether
248- * RCU is watching or not.
249- *
250- * Interupts can nest when the first interrupt invokes softirq
251- * processing on return which enables interrupts.
252- *
253- * Scheduler ticks in the idle task can mark quiescent state and
254- * terminate a grace period, if and only if the timer interrupt is
255- * not nested into another interrupt.
256- *
257- * Checking for __rcu_is_watching() here would prevent the nesting
258- * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
259- * the tick then rcu_flavor_sched_clock_irq() would wrongfully
260- * assume that it is the first interupt and eventually claim
261- * quiescient state and end grace periods prematurely.
262- *
263- * Unconditionally invoke rcu_irq_enter() so RCU state stays
264- * consistent.
265- *
266- * TINY_RCU does not support EQS, so let the compiler eliminate
267- * this part when enabled.
268- */
269- if (!IS_ENABLED (CONFIG_TINY_RCU ) && is_idle_task (current )) {
270- /*
271- * If RCU is not watching then the same careful
272- * sequence vs. lockdep and tracing is required
273- * as in irqentry_enter_from_user_mode().
274- */
275- lockdep_hardirqs_off (CALLER_ADDR0 );
276- rcu_irq_enter ();
277- instrumentation_begin ();
278- trace_hardirqs_off_finish ();
279- instrumentation_end ();
280-
281- ret .exit_rcu = true;
282- return ret ;
283- }
284-
285- /*
286- * If RCU is watching then RCU only wants to check whether it needs
287- * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
288- * already contains a warning when RCU is not watching, so no point
289- * in having another one here.
290- */
291- instrumentation_begin ();
292- rcu_irq_enter_check_tick ();
293- /* Use the combo lockdep/tracing function */
294- trace_hardirqs_off ();
295- instrumentation_end ();
296-
297- return ret ;
298- }
299-
300- static void idtentry_exit_cond_resched (struct pt_regs * regs , bool may_sched )
301- {
302- if (may_sched && !preempt_count ()) {
303- /* Sanity check RCU and thread stack */
304- rcu_irq_exit_check_preempt ();
305- if (IS_ENABLED (CONFIG_DEBUG_ENTRY ))
306- WARN_ON_ONCE (!on_thread_stack ());
307- if (need_resched ())
308- preempt_schedule_irq ();
309- }
310- /* Covers both tracing and lockdep */
311- trace_hardirqs_on ();
312- }
313-
314- /**
315- * idtentry_exit - Handle return from exception that used idtentry_enter()
316- * @regs: Pointer to pt_regs (exception entry regs)
317- * @state: Return value from matching call to idtentry_enter()
318- *
319- * Depending on the return target (kernel/user) this runs the necessary
320- * preemption and work checks if possible and reguired and returns to
321- * the caller with interrupts disabled and no further work pending.
322- *
323- * This is the last action before returning to the low level ASM code which
324- * just needs to return to the appropriate context.
325- *
326- * Counterpart to idtentry_enter(). The return value of the entry
327- * function must be fed into the @state argument.
328- */
329- void noinstr idtentry_exit (struct pt_regs * regs , idtentry_state_t state )
330- {
331- lockdep_assert_irqs_disabled ();
332-
333- /* Check whether this returns to user mode */
334- if (user_mode (regs )) {
335- irqentry_exit_to_user_mode (regs );
336- } else if (regs -> flags & X86_EFLAGS_IF ) {
337- /*
338- * If RCU was not watching on entry this needs to be done
339- * carefully and needs the same ordering of lockdep/tracing
340- * and RCU as the return to user mode path.
341- */
342- if (state .exit_rcu ) {
343- instrumentation_begin ();
344- /* Tell the tracer that IRET will enable interrupts */
345- trace_hardirqs_on_prepare ();
346- lockdep_hardirqs_on_prepare (CALLER_ADDR0 );
347- instrumentation_end ();
348- rcu_irq_exit ();
349- lockdep_hardirqs_on (CALLER_ADDR0 );
350- return ;
351- }
352-
353- instrumentation_begin ();
354- idtentry_exit_cond_resched (regs , IS_ENABLED (CONFIG_PREEMPTION ));
355- instrumentation_end ();
356- } else {
357- /*
358- * IRQ flags state is correct already. Just tell RCU if it
359- * was not watching on entry.
360- */
361- if (state .exit_rcu )
362- rcu_irq_exit ();
363- }
364- }
365-
366201#ifdef CONFIG_XEN_PV
367202#ifndef CONFIG_PREEMPTION
368203/*
@@ -427,7 +262,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
427262 inhcall = get_and_clear_inhcall ();
428263 if (inhcall && !WARN_ON_ONCE (state .exit_rcu )) {
429264 instrumentation_begin ();
430- idtentry_exit_cond_resched ( regs , true );
265+ irqentry_exit_cond_resched ( );
431266 instrumentation_end ();
432267 restore_inhcall (inhcall );
433268 } else {
0 commit comments