summaryrefslogtreecommitdiff
path: root/kernel/printk/printk_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/printk/printk_ringbuffer.c')
-rw-r--r--kernel/printk/printk_ringbuffer.c126
1 files changed, 94 insertions, 32 deletions
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 88e8f3a61922..56c8e3d031f4 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/visibility.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/string.h>
@@ -393,25 +394,38 @@ static unsigned int to_blk_size(unsigned int size)
* Sanity checker for reserve size. The ringbuffer code assumes that a data
* block does not exceed the maximum possible size that could fit within the
* ringbuffer. This function provides that basic size check so that the
- * assumption is safe.
+ * assumption is safe. In particular, it guarantees that data_push_tail() will
+ * never attempt to push the tail beyond the head.
*/
static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
{
- struct prb_data_block *db = NULL;
-
+ /* Data-less blocks take no space. */
if (size == 0)
return true;
/*
- * Ensure the alignment padded size could possibly fit in the data
- * array. The largest possible data block must still leave room for
- * at least the ID of the next block.
+ * If data blocks were allowed to be larger than half the data ring
+ * size, a wrapping data block could require more space than the full
+ * ringbuffer.
*/
- size = to_blk_size(size);
- if (size > DATA_SIZE(data_ring) - sizeof(db->id))
- return false;
+ return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
+}
- return true;
+/*
+ * Compare the current and requested logical position and decide
+ * whether more space is needed.
+ *
+ * Return false when @lpos_current is already at or beyond @lpos_target.
+ *
+ * Also return false when the difference between the positions is bigger
+ * than the size of the data buffer. It might happen only when the caller
+ * raced with another CPU(s) which already made and used the space.
+ */
+static bool need_more_space(struct prb_data_ring *data_ring,
+ unsigned long lpos_current,
+ unsigned long lpos_target)
+{
+ return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring);
}
/* Query the state of a descriptor. */
@@ -580,7 +594,7 @@ static bool data_make_reusable(struct printk_ringbuffer *rb,
unsigned long id;
/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
- while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
+ while (need_more_space(data_ring, lpos_begin, lpos_end)) {
blk = to_block(data_ring, lpos_begin);
/*
@@ -671,7 +685,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
* sees the new tail lpos, any descriptor states that transitioned to
* the reusable state must already be visible.
*/
- while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
+ while (need_more_space(data_ring, tail_lpos, lpos)) {
/*
* Make all descriptors reusable that are associated with
* data blocks before @lpos.
@@ -1002,6 +1016,17 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
return true;
}
+static bool is_blk_wrapped(struct prb_data_ring *data_ring,
+ unsigned long begin_lpos, unsigned long next_lpos)
+{
+ /*
+ * Subtract one from next_lpos since it's not actually part of this data
+ * block. This allows perfectly fitting records to not wrap.
+ */
+ return DATA_WRAPS(data_ring, begin_lpos) !=
+ DATA_WRAPS(data_ring, next_lpos - 1);
+}
+
/* Determine the end of a data block. */
static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
unsigned long lpos, unsigned int size)
@@ -1013,7 +1038,7 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
next_lpos = lpos + size;
/* First check if the data block does not wrap. */
- if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
+ if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos))
return next_lpos;
/* Wrapping data blocks store their data at the beginning. */
@@ -1051,8 +1076,17 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
do {
next_lpos = get_next_lpos(data_ring, begin_lpos, size);
- if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
- /* Failed to allocate, specify a data-less block. */
+ /*
+ * data_check_size() prevents data block allocation that could
+ * cause illegal ringbuffer states. But double check that the
+ * used space will not be bigger than the ring buffer. Wrapped
+ * messages need to reserve more space, see get_next_lpos().
+ *
+ * Specify a data-less block when the check or the allocation
+ * fails.
+ */
+ if (WARN_ON_ONCE(next_lpos - begin_lpos > DATA_SIZE(data_ring)) ||
+ !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
blk_lpos->begin = FAILED_LPOS;
blk_lpos->next = FAILED_LPOS;
return NULL;
@@ -1081,7 +1115,7 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
blk = to_block(data_ring, begin_lpos);
blk->id = id; /* LMM(data_alloc:B) */
- if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
+ if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) {
/* Wrapping data blocks store their data at the beginning. */
blk = to_block(data_ring, 0);
@@ -1125,14 +1159,21 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
return NULL;
/* Keep track if @blk_lpos was a wrapping data block. */
- wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
+ wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next);
size = to_blk_size(size);
next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
- /* If the data block does not increase, there is nothing to do. */
- if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
+ /*
+ * Use the current data block when the size does not increase, i.e.
+ * when @head_lpos is already able to accommodate the new @next_lpos.
+ *
+ * Note that need_more_space() could never return false here because
+ * the difference between the positions was bigger than the data
+ * buffer size. The data block is reopened and can't get reused.
+ */
+ if (!need_more_space(data_ring, head_lpos, next_lpos)) {
if (wrapped)
blk = to_block(data_ring, 0);
else
@@ -1140,8 +1181,18 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
return &blk->data[0];
}
- if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
+ /*
+ * data_check_size() prevents data block reallocation that could
+ * cause illegal ringbuffer states. But double check that the
+ * new used space will not be bigger than the ring buffer. Wrapped
+ * messages need to reserve more space, see get_next_lpos().
+ *
+ * Specify failure when the check or the allocation fails.
+ */
+ if (WARN_ON_ONCE(next_lpos - blk_lpos->begin > DATA_SIZE(data_ring)) ||
+ !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
return NULL;
+ }
/* The memory barrier involvement is the same as data_alloc:A. */
if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
@@ -1151,7 +1202,7 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
blk = to_block(data_ring, blk_lpos->begin);
- if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
+ if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) {
struct prb_data_block *old_blk = blk;
/* Wrapping data blocks store their data at the beginning. */
@@ -1187,7 +1238,7 @@ static unsigned int space_used(struct prb_data_ring *data_ring,
if (BLK_DATALESS(blk_lpos))
return 0;
- if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
+ if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
/* Data block does not wrap. */
return (DATA_INDEX(data_ring, blk_lpos->next) -
DATA_INDEX(data_ring, blk_lpos->begin));
@@ -1233,15 +1284,15 @@ static const char *get_data(struct prb_data_ring *data_ring,
return NULL;
}
- /* Regular data block: @begin less than @next and in same wrap. */
- if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
- blk_lpos->begin < blk_lpos->next) {
+ /* Regular data block: @begin and @next in the same wrap. */
+ if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) {
db = to_block(data_ring, blk_lpos->begin);
*data_size = blk_lpos->next - blk_lpos->begin;
/* Wrapping data block: @begin is one wrap behind @next. */
- } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
- DATA_WRAPS(data_ring, blk_lpos->next)) {
+ } else if (!is_blk_wrapped(data_ring,
+ blk_lpos->begin + DATA_SIZE(data_ring),
+ blk_lpos->next)) {
db = to_block(data_ring, 0);
*data_size = DATA_INDEX(data_ring, blk_lpos->next);
@@ -1251,6 +1302,10 @@ static const char *get_data(struct prb_data_ring *data_ring,
return NULL;
}
+ /* Sanity check. Data-less blocks were handled earlier. */
+ if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size))
+ return NULL;
+
/* A valid data block will always be aligned to the ID size. */
if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
@@ -1685,6 +1740,7 @@ fail:
memset(r, 0, sizeof(*r));
return false;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_reserve);
/* Commit the data (possibly finalizing it) and restore interrupts. */
static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
@@ -1759,6 +1815,7 @@ void prb_commit(struct prb_reserved_entry *e)
if (head_id != e->id)
desc_make_final(e->rb, e->id);
}
+EXPORT_SYMBOL_IF_KUNIT(prb_commit);
/**
* prb_final_commit() - Commit and finalize (previously reserved) data to
@@ -2133,9 +2190,9 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
* there may be other finalized records beyond that
* need to be printed for a panic situation. If this
* is the panic CPU, skip this
- * non-existent/non-finalized record unless it is
- * at or beyond the head, in which case it is not
- * possible to continue.
+ * non-existent/non-finalized record unless non-panic
+ * CPUs are still running and their debugging is
+ * explicitly enabled.
*
* Note that new messages printed on panic CPU are
* finalized when we are here. The only exception
@@ -2143,10 +2200,13 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
* But it would have the sequence number returned
* by "prb_next_reserve_seq() - 1".
*/
- if (this_cpu_in_panic() && ((*seq + 1) < prb_next_reserve_seq(rb)))
+ if (panic_on_this_cpu() &&
+ (!debug_non_panic_cpus || legacy_allow_panic_sync) &&
+ ((*seq + 1) < prb_next_reserve_seq(rb))) {
(*seq)++;
- else
+ } else {
return false;
+ }
}
}
@@ -2181,6 +2241,7 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
{
return _prb_read_valid(rb, &seq, r, NULL);
}
+EXPORT_SYMBOL_IF_KUNIT(prb_read_valid);
/**
* prb_read_valid_info() - Non-blocking read of meta data for a requested
@@ -2330,6 +2391,7 @@ void prb_init(struct printk_ringbuffer *rb,
infos[0].seq = -(u64)_DESCS_COUNT(descbits);
infos[_DESCS_COUNT(descbits) - 1].seq = 0;
}
+EXPORT_SYMBOL_IF_KUNIT(prb_init);
/**
* prb_record_text_space() - Query the full actual used ringbuffer space for