118118 * from scratch.
119119 */
120120
121+ /* Counter of active nbcon emergency contexts. */
122+ static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT (0 );
123+
121124/**
122125 * nbcon_state_set - Helper function to set the console state
123126 * @con: Console to update
@@ -1163,6 +1166,17 @@ static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_contex
11631166 if (kthread_should_stop ())
11641167 return true;
11651168
1169+ /*
1170+ * Block the kthread when the system is in an emergency or panic mode.
1171+ * It increases the chance that these contexts would be able to show
1172+ * the messages directly. And it reduces the risk of interrupted writes
1173+ * where the context with a higher priority takes over the nbcon console
1174+ * ownership in the middle of a message.
1175+ */
1176+ if (unlikely (atomic_read (& nbcon_cpu_emergency_cnt )) ||
1177+ unlikely (panic_in_progress ()))
1178+ return false;
1179+
11661180 cookie = console_srcu_read_lock ();
11671181
11681182 flags = console_srcu_read_flags (con );
@@ -1214,6 +1228,14 @@ static int nbcon_kthread_func(void *__console)
12141228 if (kthread_should_stop ())
12151229 return 0 ;
12161230
1231+ /*
1232+ * Block the kthread when the system is in an emergency or panic
1233+ * mode. See nbcon_kthread_should_wakeup() for more details.
1234+ */
1235+ if (unlikely (atomic_read (& nbcon_cpu_emergency_cnt )) ||
1236+ unlikely (panic_in_progress ()))
1237+ goto wait_for_event ;
1238+
12171239 backlog = false;
12181240
12191241 /*
@@ -1505,10 +1527,10 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
15051527 ctxt -> prio = nbcon_get_default_prio ();
15061528 ctxt -> allow_unsafe_takeover = allow_unsafe_takeover ;
15071529
1508- if (!nbcon_context_try_acquire (ctxt , false))
1509- return - EPERM ;
1510-
15111530 while (nbcon_seq_read (con ) < stop_seq ) {
1531+ if (!nbcon_context_try_acquire (ctxt , false))
1532+ return - EPERM ;
1533+
15121534 /*
15131535 * nbcon_emit_next_record() returns false when the console was
15141536 * handed over or taken over. In both cases the context is no
@@ -1517,6 +1539,8 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
15171539 if (!nbcon_emit_next_record (& wctxt , true))
15181540 return - EAGAIN ;
15191541
1542+ nbcon_context_release (ctxt );
1543+
15201544 if (!ctxt -> backlog ) {
15211545 /* Are there reserved but not yet finalized records? */
15221546 if (nbcon_seq_read (con ) < stop_seq )
@@ -1525,7 +1549,6 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
15251549 }
15261550 }
15271551
1528- nbcon_context_release (ctxt );
15291552 return err ;
15301553}
15311554
@@ -1655,6 +1678,8 @@ void nbcon_cpu_emergency_enter(void)
16551678
16561679 preempt_disable ();
16571680
1681+ atomic_inc (& nbcon_cpu_emergency_cnt );
1682+
16581683 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting ();
16591684 (* cpu_emergency_nesting )++ ;
16601685}
@@ -1669,10 +1694,24 @@ void nbcon_cpu_emergency_exit(void)
16691694 unsigned int * cpu_emergency_nesting ;
16701695
16711696 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting ();
1672-
16731697 if (!WARN_ON_ONCE (* cpu_emergency_nesting == 0 ))
16741698 (* cpu_emergency_nesting )-- ;
16751699
1700+ /*
1701+ * Wake up kthreads because there might be some pending messages
1702+ * added by other CPUs with normal priority since the last flush
1703+ * in the emergency context.
1704+ */
1705+ if (!WARN_ON_ONCE (atomic_read (& nbcon_cpu_emergency_cnt ) == 0 )) {
1706+ if (atomic_dec_return (& nbcon_cpu_emergency_cnt ) == 0 ) {
1707+ struct console_flush_type ft ;
1708+
1709+ printk_get_console_flush_type (& ft );
1710+ if (ft .nbcon_offload )
1711+ nbcon_kthreads_wake ();
1712+ }
1713+ }
1714+
16761715 preempt_enable ();
16771716}
16781717
0 commit comments