summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2025-12-01 14:17:04 +0100
committerPetr Mladek <pmladek@suse.com>2025-12-01 14:17:04 +0100
commit5cae92e622300d04ff23cc721dc67d8f5788e51c (patch)
tree7993aeede5230cb853f2e95c419cd2b5a9c71a4e
parent4f132d81f9f8e47a8866d162698adb7df7184466 (diff)
parent187de7c212e5fa87779e1026bf949337bca0cdaa (diff)
Merge branch 'rework/write_atomic-unsafe' into for-linus
-rw-r--r--include/linux/console.h19
-rw-r--r--kernel/printk/nbcon.c47
2 files changed, 48 insertions, 18 deletions
diff --git a/include/linux/console.h b/include/linux/console.h
index d17f1f525bec..5f17321ed962 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -186,6 +186,8 @@ static inline void con_debug_leave(void) { }
* printing callbacks must not be called.
* @CON_NBCON: Console can operate outside of the legacy style console_lock
* constraints.
+ * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
+ * therefore only used by nbcon_atomic_flush_unsafe().
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -197,6 +199,7 @@ enum cons_flags {
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
CON_NBCON = BIT(8),
+ CON_NBCON_ATOMIC_UNSAFE = BIT(9),
};
/**
@@ -608,6 +611,7 @@ extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
+extern bool nbcon_allow_unsafe_takeover(void);
extern bool nbcon_kdb_try_acquire(struct console *con,
struct nbcon_write_context *wctxt);
extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
@@ -627,9 +631,18 @@ static inline bool console_is_usable(struct console *con, short flags, bool use_
return false;
if (flags & CON_NBCON) {
- /* The write_atomic() callback is optional. */
- if (use_atomic && !con->write_atomic)
- return false;
+ if (use_atomic) {
+ /* The write_atomic() callback is optional. */
+ if (!con->write_atomic)
+ return false;
+
+ /*
+ * An unsafe write_atomic() callback is only usable
+ * when unsafe takeovers are allowed.
+ */
+ if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
+ return false;
+ }
/*
* For the !use_atomic case, @printk_kthreads_running is not
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index 0bbb03fde35d..3fa403f9831f 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -1437,6 +1437,26 @@ enum nbcon_prio nbcon_get_default_prio(void)
return NBCON_PRIO_NORMAL;
}
+/*
+ * Track if it is allowed to perform unsafe hostile takeovers of console
+ * ownership. When true, console drivers might perform unsafe actions while
+ * printing. It is externally available via nbcon_allow_unsafe_takeover().
+ */
+static bool panic_nbcon_allow_unsafe_takeover;
+
+/**
+ * nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
+ *
+ * Return: True, when it is permitted to perform unsafe console printing
+ *
+ * This is also used by console_is_usable() to determine if it is allowed to
+ * call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
+ */
+bool nbcon_allow_unsafe_takeover(void)
+{
+ return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
+}
+
/**
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
* in legacy contexts
@@ -1507,7 +1527,6 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
- * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
* failure.
@@ -1526,8 +1545,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
* returned, it cannot be expected that the unfinalized record will become
* available.
*/
-static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
- bool allow_unsafe_takeover)
+static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
{
struct nbcon_write_context wctxt = { };
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
@@ -1536,7 +1554,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
ctxt->console = con;
ctxt->spinwait_max_us = 2000;
ctxt->prio = nbcon_get_default_prio();
- ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
+ ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
while (nbcon_seq_read(con) < stop_seq) {
if (!nbcon_context_try_acquire(ctxt, false))
@@ -1568,15 +1586,13 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
- * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* This will stop flushing before @stop_seq if another context has ownership.
* That context is then responsible for the flushing. Likewise, if new records
* are added while this context was flushing and there is no other context
* to handle the printing, this context must also flush those records.
*/
-static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
- bool allow_unsafe_takeover)
+static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
{
struct console_flush_type ft;
unsigned long flags;
@@ -1591,7 +1607,7 @@ again:
*/
local_irq_save(flags);
- err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+ err = __nbcon_atomic_flush_pending_con(con, stop_seq);
local_irq_restore(flags);
@@ -1623,9 +1639,8 @@ again:
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
* write_atomic() callback
* @stop_seq: Flush up until this record
- * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*/
-static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
+static void __nbcon_atomic_flush_pending(u64 stop_seq)
{
struct console *con;
int cookie;
@@ -1643,7 +1658,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
if (nbcon_seq_read(con) >= stop_seq)
continue;
- nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+ nbcon_atomic_flush_pending_con(con, stop_seq);
}
console_srcu_read_unlock(cookie);
}
@@ -1659,7 +1674,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove
*/
void nbcon_atomic_flush_pending(void)
{
- __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
}
/**
@@ -1671,7 +1686,9 @@ void nbcon_atomic_flush_pending(void)
*/
void nbcon_atomic_flush_unsafe(void)
{
- __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
+ panic_nbcon_allow_unsafe_takeover = true;
+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
+ panic_nbcon_allow_unsafe_takeover = false;
}
/**
@@ -1894,7 +1911,7 @@ void nbcon_device_release(struct console *con)
* using the legacy loop.
*/
if (ft.nbcon_atomic) {
- __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
} else if (ft.legacy_direct) {
if (console_trylock())
console_unlock();
@@ -1964,5 +1981,5 @@ void nbcon_kdb_release(struct nbcon_write_context *wctxt)
* The console was locked only when the write_atomic() callback
* was usable.
*/
- __nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb), false);
+ __nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
}