diff options
39 files changed, 678 insertions, 387 deletions
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst index 7f2f11b48286..c0b1b6089307 100644 --- a/Documentation/core-api/printk-formats.rst +++ b/Documentation/core-api/printk-formats.rst @@ -547,11 +547,13 @@ Time and date %pt[RT]s YYYY-mm-dd HH:MM:SS %pt[RT]d YYYY-mm-dd %pt[RT]t HH:MM:SS - %pt[RT][dt][r][s] + %ptSp <seconds>.<nanoseconds> + %pt[RST][dt][r][s] For printing date and time as represented by:: - R struct rtc_time structure + R content of struct rtc_time + S content of struct timespec64 T time64_t type in human readable format. @@ -563,6 +565,11 @@ The %pt[RT]s (space) will override ISO 8601 separator by using ' ' (space) instead of 'T' (Capital T) between date and time. It won't have any effect when date or time is omitted. +The %ptSp is equivalent to %lld.%09ld for the content of the struct timespec64. +When the other specifiers are given, it becomes the respective equivalent of +%ptT[dt][r][s].%09ld. In other words, the seconds are being printed in +the human readable format followed by a dot and nanoseconds. + Passed by reference. struct clk diff --git a/MAINTAINERS b/MAINTAINERS index 618e23f99dfa..2507a59f6572 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20608,6 +20608,7 @@ R: John Ogness <john.ogness@linutronix.de> R: Sergey Senozhatsky <senozhatsky@chromium.org> S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git +F: Documentation/core-api/printk-basics.rst F: include/linux/printk.h F: kernel/printk/ diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c index 419021175272..fc0f543d1d8e 100644 --- a/arch/um/kernel/kmsg_dump.c +++ b/arch/um/kernel/kmsg_dump.c @@ -31,7 +31,7 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper, * expected to output the crash information. */ if (strcmp(con->name, "ttynull") != 0 && - (console_srcu_read_flags(con) & CON_ENABLED)) { + console_is_usable(con, console_srcu_read_flags(con), true)) { break; } } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 70e55f5ff85e..5459ffdde8dc 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -275,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg) struct timespec64 t; ktime_get_ts64(&t); - dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", - msg, t.tv_sec, t.tv_nsec); + dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t); } #else #define debug_timestamp(smi_info, x) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 1b63f7d2fcda..ef1582a029f4 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1083,10 +1083,8 @@ static int sender(void *send_info, struct ipmi_smi_msg *msg) struct timespec64 t; ktime_get_real_ts64(&t); - dev_dbg(&ssif_info->client->dev, - "**Enqueue %02x %02x: %lld.%6.6ld\n", - msg->data[0], msg->data[1], - (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); + dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n", + msg->data[0], msg->data[1], &t); } return IPMI_CC_NO_ERROR; } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 67cd69551e42..9e5d662cd4e8 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -59,7 +59,7 @@ static void sync_print_fence(struct seq_file *s, struct timespec64 ts64 = ktime_to_timespec64(fence->timestamp); - seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); + seq_printf(s, "@%ptSp", &ts64); } seq_printf(s, ": %lld", fence->seqno); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 8a026bc9ea44..4e2fe6674db8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -217,8 +217,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); drm_printf(&p, "kernel: " UTS_RELEASE "\n"); drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, - coredump->reset_time.tv_nsec); + drm_printf(&p, "time: %ptSp\n", &coredump->reset_time); if (coredump->reset_task_info.task.pid) drm_printf(&p, "process_name: %s PID: %d\n", diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 46f59883183d..67eae477cbd7 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -794,10 +794,8 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal( ts_vblank_time = ktime_to_timespec64(*vblank_time); drm_dbg_vbl(dev, - "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n", - pipe, hpos, vpos, - (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000, - (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000, + "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n", + pipe, hpos, vpos, &ts_etime, &ts_vblank_time, duration_ns / 1000, i); return true; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 071bcdea80f7..19b470968f4d 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -82,8 +82,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) drm_printf(p, "kernel: " UTS_RELEASE "\n"); drm_printf(p, "module: " KBUILD_MODNAME "\n"); drm_printf(p, "dpu devcoredump\n"); - drm_printf(p, "time: %lld.%09ld\n", - state->time.tv_sec, state->time.tv_nsec); + drm_printf(p, "time: %ptSp\n", &state->time); list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 17759abc46d7..a4251afe4541 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -197,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, drm_printf(&p, "---\n"); drm_printf(&p, "kernel: " UTS_RELEASE "\n"); drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", - state->time.tv_sec, state->time.tv_nsec); + drm_printf(&p, "time: %ptSp\n", &state->time); if (state->comm) drm_printf(&p, "comm: %s\n", state->comm); if (state->cmd) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 203e3038cc81..d444eda65ca6 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -106,9 +106,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count, drm_puts(&p, "module: " KBUILD_MODNAME "\n"); ts = ktime_to_timespec64(ss->snapshot_time); - drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); + drm_printf(&p, "Snapshot time: %ptSp\n", &ts); ts = ktime_to_timespec64(ss->boot_time); - drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); + drm_printf(&p, "Uptime: %ptSp\n", &ts); drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid); xe_device_snapshot_print(xe, &p); diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 67d4a301895c..d59b5e5a7b45 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -586,14 +586,11 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, rate = mmc_test_rate(tot, &ts); iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ - pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " - "%llu.%09u seconds (%u kB/s, %u KiB/s, " - "%u.%02u IOPS, sg_len %d)\n", - mmc_hostname(test->card->host), count, sectors, count, - sectors >> 1, (sectors & 1 ? ".5" : ""), - (u64)ts.tv_sec, (u32)ts.tv_nsec, - rate / 1000, rate / 1024, iops / 100, iops % 100, - test->area.sg_len); + pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n", + mmc_hostname(test->card->host), count, sectors, count, + sectors >> 1, (sectors & 1 ? ".5" : ""), &ts, + rate / 1000, rate / 1024, iops / 100, iops % 100, + test->area.sg_len); mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); } @@ -3074,10 +3071,9 @@ static int mtf_test_show(struct seq_file *sf, void *data) seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); list_for_each_entry(tr, &gr->tr_lst, link) { - seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", - tr->count, tr->sectors, - (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, - tr->rate, tr->iops / 100, tr->iops % 100); + seq_printf(sf, "%u %d %ptSp %u %u.%02u\n", + tr->count, tr->sectors, &tr->ts, tr->rate, + tr->iops / 100, tr->iops % 100); } } diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index d7818710bc02..d5949d2c3e71 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -775,9 +775,8 @@ static void sja1105_tas_state_machine(struct work_struct *work) base_time_ts = ns_to_timespec64(base_time); now_ts = ns_to_timespec64(now); - dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n", - base_time_ts.tv_sec, base_time_ts.tv_nsec, - now_ts.tv_sec, now_ts.tv_nsec); + dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n", + &base_time_ts, &now_ts); break; @@ -798,8 +797,7 @@ static void sja1105_tas_state_machine(struct work_struct *work) if (now < tas_data->oper_base_time) { /* TAS has not started yet */ diff = ns_to_timespec64(tas_data->oper_base_time - now); - dev_dbg(ds->dev, "time to start: [%lld.%09ld]", - diff.tv_sec, diff.tv_nsec); + dev_dbg(ds->dev, "time to start: [%ptSp]", &diff); break; } diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index ea3c3eb2ef20..ec39e35f3857 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -229,14 +229,11 @@ static void e1000e_systim_overflow_work(struct work_struct *work) systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; - u64 ns; /* Update the timecounter */ - ns = timecounter_read(&adapter->tc); + ts = ns_to_timespec64(timecounter_read(&adapter->tc)); - ts = ns_to_timespec64(ns); - e_dbg("SYSTIM overflow check at %lld.%09lu\n", - (long long) ts.tv_sec, ts.tv_nsec); + e_dbg("SYSTIM overflow check at %ptSp\n", &ts); schedule_delayed_work(&adapter->systim_overflow_work, E1000_SYSTIM_OVERFLOW_PERIOD); diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index a7876882aeaf..bd85d02ecadd 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -840,14 +840,11 @@ static void igb_ptp_overflow_check(struct work_struct *work) struct igb_adapter *igb = container_of(work, struct igb_adapter, ptp_overflow_work.work); struct timespec64 ts; - u64 ns; /* Update the timecounter */ - ns = timecounter_read(&igb->tc); + ts = ns_to_timespec64(timecounter_read(&igb->tc)); - ts = ns_to_timespec64(ns); - pr_debug("igb overflow check at %lld.%09lu\n", - (long long) ts.tv_sec, ts.tv_nsec); + pr_debug("igb overflow check at %ptSp\n", &ts); schedule_delayed_work(&igb->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 31617772ad51..0686f7ecbe91 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -331,9 +331,8 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, rate = div64_u64(size * NSEC_PER_SEC, ns * 1000); dev_info(&epf_test->epf->dev, - "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n", - op, size, dma ? "YES" : "NO", - (u64)ts.tv_sec, (u32)ts.tv_nsec, rate); + "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n", + op, size, dma ? "YES" : "NO", &ts, rate); } static void pci_epf_test_copy(struct pci_epf_test *epf_test, diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c index f5eeb4dd01ad..05bbf8d30ef1 100644 --- a/drivers/pps/generators/pps_gen_parport.c +++ b/drivers/pps/generators/pps_gen_parport.c @@ -80,8 +80,7 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) /* check if we are late */ if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { local_irq_restore(flags); - pr_err("we are late this time %lld.%09ld\n", - (s64)ts1.tv_sec, ts1.tv_nsec); + pr_err("we are late this time %ptSp\n", &ts1); goto done; } diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index e9389876229e..6985c34de2ce 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -163,8 +163,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); - dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n", - (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); + dev_dbg(&pps->dev, "PPS event at %ptSp\n", &ts->ts_real); timespec_to_pps_ktime(&ts_real, ts->ts_real); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index a5c363252986..258169df0ff8 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -4287,11 +4287,9 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC; sys_ts = ns_to_timespec64(ns); - seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC", - ts.tv_sec, ts.tv_nsec, &ts); - seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS", - sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts, - bp->utc_tai_offset); + seq_printf(s, "%7s: %ptSp == %ptS TAI\n", "PHC", &ts, &ts); + seq_printf(s, "%7s: %ptSp == %ptS UTC offset %d\n", "SYS", + &sys_ts, &sys_ts, bp->utc_tai_offset); seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "", timespec64_to_ns(&ts) - ns, post_ns - pre_ns); @@ -4499,9 +4497,8 @@ ptp_ocp_phc_info(struct ptp_ocp *bp) ptp_clock_index(bp->ptp)); if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL)) - dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n", - ts.tv_sec, ts.tv_nsec, - bp->sync ? "in-sync" : "UNSYNCED"); + dev_info(&bp->pdev->dev, "Time: %ptSp, %s\n", + &ts, bp->sync ? "in-sync" : "UNSYNCED"); } static void diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 2f3039cca6f2..9a5c6a73511f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -974,8 +974,7 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array) static void dasd_stats_seq_print(struct seq_file *m, struct dasd_profile_info *data) { - seq_printf(m, "start_time %lld.%09ld\n", - (s64)data->starttod.tv_sec, data->starttod.tv_nsec); + seq_printf(m, "start_time %ptSp\n", &data->starttod); seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); seq_printf(m, "total_pav %u\n", data->dasd_io_alias); diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index cdc6b12b1ec2..4ed57ea1f854 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -138,9 +138,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) */ len += scnprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, - "%16llu.%09lu %-50s %8x %8x %16llx %16llx " - "%16llx %16llx %16llx\n", (u64)val.tv_sec, - val.tv_nsec, str, tbp->host_no, tbp->tag, + "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n", + &val, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; @@ -180,9 +179,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) */ len += scnprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, - "%16llu.%09lu %-50s %8x %8x %16llx %16llx " - "%16llx %16llx %16llx\n", (u64)val.tv_sec, - val.tv_nsec, str, tbp->host_no, tbp->tag, + "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n", + &val, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; @@ -215,32 +213,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug, { int len = 0; int buf_size = debug->buf_size; - struct timespec64 val1, val2; + struct timespec64 val, val1, val2; int i = 0; - ktime_get_real_ts64(&val1); + ktime_get_real_ts64(&val); len = scnprintf(debug->debug_buffer + len, buf_size - len, "------------------------------------------\n" "\t\tTime\n" "------------------------------------------\n"); + val1 = timespec64_sub(val, stats->stats_timestamps.last_reset_time); + val2 = timespec64_sub(val, stats->stats_timestamps.last_read_time); len += scnprintf(debug->debug_buffer + len, buf_size - len, - "Current time : [%lld:%ld]\n" - "Last stats reset time: [%lld:%09ld]\n" - "Last stats read time: [%lld:%ld]\n" - "delta since last reset: [%lld:%ld]\n" - "delta since last read: [%lld:%ld]\n", - (s64)val1.tv_sec, val1.tv_nsec, - (s64)stats->stats_timestamps.last_reset_time.tv_sec, - stats->stats_timestamps.last_reset_time.tv_nsec, - (s64)stats->stats_timestamps.last_read_time.tv_sec, - stats->stats_timestamps.last_read_time.tv_nsec, - (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, - timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, - (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, - timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); - - stats->stats_timestamps.last_read_time = val1; + "Current time : [%ptSp]\n" + "Last stats reset time: [%ptSp]\n" + "Last stats read time: [%ptSp]\n" + "delta since last reset: [%ptSp]\n" + "delta since last read: [%ptSp]\n", + &val, + &stats->stats_timestamps.last_reset_time, + &stats->stats_timestamps.last_read_time, + &val1, &val2); + + stats->stats_timestamps.last_read_time = val; len += scnprintf(debug->debug_buffer + len, buf_size - len, "------------------------------------------\n" @@ -416,8 +411,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug, jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2); len += scnprintf(debug->debug_buffer + len, buf_size - len, - "Last ISR time: %llu (%8llu.%09lu)\n" - "Last ACK time: %llu (%8llu.%09lu)\n" + "Last ISR time: %llu (%ptSp)\n" + "Last ACK time: %llu (%ptSp)\n" "Max ISR jiffies: %llu\n" "Max ISR time (ms) (0 denotes < 1 ms): %llu\n" "Corr. work done: %llu\n" @@ -437,10 +432,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of rport not ready: %lld\n" "Number of receive frame errors: %lld\n" "Port speed (in Mbps): %lld\n", - (u64)stats->misc_stats.last_isr_time, - (s64)val1.tv_sec, val1.tv_nsec, - (u64)stats->misc_stats.last_ack_time, - (s64)val2.tv_sec, val2.tv_nsec, + (u64)stats->misc_stats.last_isr_time, &val1, + (u64)stats->misc_stats.last_ack_time, &val2, (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies), (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms), (u64)atomic64_read(&stats->misc_stats.corr_work_done), @@ -857,8 +850,8 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata, len = *orig_len; len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len, - "%ptTs.%09lu ns%8x %c%8x\t", - &tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec, + "%ptSs ns%8x %c%8x\t", + &tdata->time_stamp, tdata->host_no, tdata->frame_type, tdata->frame_len); fc_trace = (char *)FC_TRACE_ADDRESS(tdata); diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index 9dd975b36b5b..edf3e5ef28a6 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c @@ -282,8 +282,8 @@ snic_stats_show(struct seq_file *sfp, void *data) jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms); seq_printf(sfp, - "Last ISR Time : %llu (%8llu.%09lu)\n" - "Last Ack Time : %llu (%8llu.%09lu)\n" + "Last ISR Time : %llu (%ptSp)\n" + "Last Ack Time : %llu (%ptSp)\n" "Ack ISRs : %llu\n" "IO Cmpl ISRs : %llu\n" "Err Notify ISRs : %llu\n" @@ -298,10 +298,8 @@ snic_stats_show(struct seq_file *sfp, void *data) "Queue Ramp Down : %lld\n" "Queue Last Queue Depth : %lld\n" "Target Not Ready : %lld\n", - (u64) stats->misc.last_isr_time, - last_isr_tms.tv_sec, last_isr_tms.tv_nsec, - (u64)stats->misc.last_ack_time, - last_ack_tms.tv_sec, last_ack_tms.tv_nsec, + (u64) stats->misc.last_isr_time, &last_isr_tms, + (u64) stats->misc.last_ack_time, &last_ack_tms, (u64) atomic64_read(&stats->misc.ack_isr_cnt), (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c index c2e5ab7e976c..6bad1ea9a6a7 100644 --- a/drivers/scsi/snic/snic_trc.c +++ b/drivers/scsi/snic/snic_trc.c @@ -56,9 +56,8 @@ snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz) jiffies_to_timespec64(td->ts, &tmspec); len += snprintf(buf, buf_sz, - "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", - tmspec.tv_sec, - tmspec.tv_nsec, + "%ptSp %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", + &tmspec, td->fn, td->hno, td->tag, diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/av7110/av7110.c index bc9a2a40afcb..602342d1174f 100644 --- a/drivers/staging/media/av7110/av7110.c +++ b/drivers/staging/media/av7110/av7110.c @@ -321,7 +321,7 @@ static inline void print_time(char *s) struct timespec64 ts; ktime_get_real_ts64(&ts); - pr_info("%s(): %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec); + pr_info("%s(): %ptSp\n", s, &ts); #endif } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 85f6c5a76e0f..5a955c80a853 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -577,7 +577,6 @@ static int __init kgdboc_earlycon_init(char *opt) console_list_lock(); for_each_console(con) { if (con->write && con->read && - (con->flags & (CON_BOOT | CON_ENABLED)) && (!opt || !opt[0] || strcmp(con->name, opt) == 0)) break; } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d18c0eaef9b7..bf50c6e7a029 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -2155,7 +2155,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, " rfiles: %20lld\n" " rsubdirs: %20lld\n" "rbytes: %20lld\n" - "rctime: %10lld.%09ld\n", + "rctime: %ptSp\n", ci->i_files + ci->i_subdirs, ci->i_files, ci->i_subdirs, @@ -2163,8 +2163,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, ci->i_rfiles, ci->i_rsubdirs, ci->i_rbytes, - ci->i_rctime.tv_sec, - ci->i_rctime.tv_nsec); + &ci->i_rctime); } if (*ppos >= dfi->dir_info_len) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 37d3a2477c17..a596cb53f1ac 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -879,7 +879,9 @@ void ceph_fill_file_time(struct inode *inode, int issued, { struct ceph_client *cl = ceph_inode_to_client(inode); struct ceph_inode_info *ci = ceph_inode(inode); + struct timespec64 iatime = inode_get_atime(inode); struct timespec64 ictime = inode_get_ctime(inode); + struct timespec64 imtime = inode_get_mtime(inode); int warn = 0; if (issued & (CEPH_CAP_FILE_EXCL| @@ -889,39 +891,26 @@ void ceph_fill_file_time(struct inode *inode, int issued, CEPH_CAP_XATTR_EXCL)) { if (ci->i_version == 0 || timespec64_compare(ctime, &ictime) > 0) { - doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n", - ictime.tv_sec, ictime.tv_nsec, - ctime->tv_sec, ctime->tv_nsec); + doutc(cl, "ctime %ptSp -> %ptSp inc w/ cap\n", &ictime, ctime); inode_set_ctime_to_ts(inode, *ctime); } if (ci->i_version == 0 || ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { /* the MDS did a utimes() */ - doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n", - inode_get_mtime_sec(inode), - inode_get_mtime_nsec(inode), - mtime->tv_sec, mtime->tv_nsec, - ci->i_time_warp_seq, (int)time_warp_seq); + doutc(cl, "mtime %ptSp -> %ptSp tw %d -> %d\n", &imtime, mtime, + ci->i_time_warp_seq, (int)time_warp_seq); inode_set_mtime_to_ts(inode, *mtime); inode_set_atime_to_ts(inode, *atime); ci->i_time_warp_seq = time_warp_seq; } else if (time_warp_seq == ci->i_time_warp_seq) { - struct timespec64 ts; - /* nobody did utimes(); take the max */ - ts = inode_get_mtime(inode); - if (timespec64_compare(mtime, &ts) > 0) { - doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n", - ts.tv_sec, ts.tv_nsec, - mtime->tv_sec, mtime->tv_nsec); + if (timespec64_compare(mtime, &imtime) > 0) { + doutc(cl, "mtime %ptSp -> %ptSp inc\n", &imtime, mtime); inode_set_mtime_to_ts(inode, *mtime); } - ts = inode_get_atime(inode); - if (timespec64_compare(atime, &ts) > 0) { - doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n", - ts.tv_sec, ts.tv_nsec, - atime->tv_sec, atime->tv_nsec); + if (timespec64_compare(atime, &iatime) > 0) { + doutc(cl, "atime %ptSp -> %ptSp inc\n", &iatime, atime); inode_set_atime_to_ts(inode, *atime); } } else if (issued & CEPH_CAP_FILE_EXCL) { @@ -2703,10 +2692,8 @@ retry: if (ia_valid & ATTR_ATIME) { struct timespec64 atime = inode_get_atime(inode); - doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n", - inode, ceph_vinop(inode), - atime.tv_sec, atime.tv_nsec, - attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); + doutc(cl, "%p %llx.%llx atime %ptSp -> %ptSp\n", + inode, ceph_vinop(inode), &atime, &attr->ia_atime); if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) { ci->i_time_warp_seq++; inode_set_atime_to_ts(inode, attr->ia_atime); @@ -2780,10 +2767,8 @@ retry: if (ia_valid & ATTR_MTIME) { struct timespec64 mtime = inode_get_mtime(inode); - doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n", - inode, ceph_vinop(inode), - mtime.tv_sec, mtime.tv_nsec, - attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); + doutc(cl, "%p %llx.%llx mtime %ptSp -> %ptSp\n", + inode, ceph_vinop(inode), &mtime, &attr->ia_mtime); if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) { ci->i_time_warp_seq++; inode_set_mtime_to_ts(inode, attr->ia_mtime); @@ -2804,13 +2789,11 @@ retry: /* these do nothing */ if (ia_valid & ATTR_CTIME) { + struct timespec64 ictime = inode_get_ctime(inode); bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; - doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n", - inode, ceph_vinop(inode), - inode_get_ctime_sec(inode), - inode_get_ctime_nsec(inode), - attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, + doutc(cl, "%p %llx.%llx ctime %ptSp -> %ptSp (%s)\n", + inode, ceph_vinop(inode), &ictime, &attr->ia_ctime, only ? "ctime only" : "ignored"); if (only) { /* diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 537165db4519..ad1f30bea175 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -249,8 +249,7 @@ static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val, static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, size_t size) { - return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec, - ci->i_rctime.tv_nsec); + return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_rctime); } /* dir pin */ @@ -307,8 +306,7 @@ static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci) static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, size_t size) { - return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec, - ci->i_snap_btime.tv_nsec); + return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_snap_btime); } static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, diff --git a/include/linux/console.h b/include/linux/console.h index 031a58dc2b91..fc9f5c5c1b04 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -19,6 +19,7 @@ #include <linux/irq_work.h> #include <linux/rculist.h> #include <linux/rcuwait.h> +#include <linux/smp.h> #include <linux/types.h> #include <linux/vesa.h> @@ -185,6 +186,8 @@ static inline void con_debug_leave(void) { } * printing callbacks must not be called. * @CON_NBCON: Console can operate outside of the legacy style console_lock * constraints. + * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is + * therefore only used by nbcon_atomic_flush_unsafe(). */ enum cons_flags { CON_PRINTBUFFER = BIT(0), @@ -196,6 +199,7 @@ enum cons_flags { CON_EXTENDED = BIT(6), CON_SUSPENDED = BIT(7), CON_NBCON = BIT(8), + CON_NBCON_ATOMIC_UNSAFE = BIT(9), }; /** @@ -602,16 +606,80 @@ static inline bool console_is_registered(const struct console *con) extern void nbcon_cpu_emergency_enter(void); extern void nbcon_cpu_emergency_exit(void); extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt); +extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, + char *buf, unsigned int len); extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt); extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt); extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt); +extern bool nbcon_allow_unsafe_takeover(void); +extern bool nbcon_kdb_try_acquire(struct console *con, + struct nbcon_write_context *wctxt); +extern void nbcon_kdb_release(struct nbcon_write_context *wctxt); + +/* + * Check if the given console is currently capable and allowed to print + * records. Note that this function does not consider the current context, + * which can also play a role in deciding if @con can be used to print + * records. + */ +static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) +{ + if (!(flags & CON_ENABLED)) + return false; + + if ((flags & CON_SUSPENDED)) + return false; + + if (flags & CON_NBCON) { + if (use_atomic) { + /* The write_atomic() callback is optional. */ + if (!con->write_atomic) + return false; + + /* + * An unsafe write_atomic() callback is only usable + * when unsafe takeovers are allowed. + */ + if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover()) + return false; + } + + /* + * For the !use_atomic case, @printk_kthreads_running is not + * checked because the write_thread() callback is also used + * via the legacy loop when the printer threads are not + * available. + */ + } else { + if (!con->write) + return false; + } + + /* + * Console drivers may assume that per-cpu resources have been + * allocated. So unless they're explicitly marked as being able to + * cope (CON_ANYTIME) don't call them until this CPU is officially up. + */ + if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME)) + return false; + + return true; +} + #else static inline void nbcon_cpu_emergency_enter(void) { } static inline void nbcon_cpu_emergency_exit(void) { } static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; } +static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, + char *buf, unsigned int len) { } static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; } static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; } static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { } +static inline bool nbcon_kdb_try_acquire(struct console *con, + struct nbcon_write_context *wctxt) { return false; } +static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { } +static inline bool console_is_usable(struct console *con, short flags, + bool use_atomic) { return false; } #endif extern int console_set_on_cmdline; diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ecbf819deeca..741c58e86431 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -14,6 +14,7 @@ */ #include <linux/list.h> +#include <linux/smp.h> /* Shifted versions of the command enable bits are be used if the command * has no arguments (see kdb_check_flags). This allows commands, such as @@ -207,11 +208,26 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) /* Dynamic kdb shell command registration */ extern int kdb_register(kdbtab_t *cmd); extern void kdb_unregister(kdbtab_t *cmd); + +/* Return true when KDB as locked for printing a message on this CPU. */ +static inline +bool kdb_printf_on_this_cpu(void) +{ + /* + * We can use raw_smp_processor_id() here because the task could + * not get migrated when KDB has locked for printing on this CPU. + */ + return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id()); +} + #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} static inline int kdb_register(kdbtab_t *cmd) { return 0; } static inline void kdb_unregister(kdbtab_t *cmd) {} + +static inline bool kdb_printf_on_this_cpu(void) { return false; } + #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index b12b9db75c1d..61c1690058ed 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -589,24 +589,41 @@ static void kdb_msg_write(const char *msg, int msg_len) */ cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - if (!(console_srcu_read_flags(c) & CON_ENABLED)) + short flags = console_srcu_read_flags(c); + + if (!console_is_usable(c, flags, true)) continue; if (c == dbg_io_ops->cons) continue; - if (!c->write) - continue; - /* - * Set oops_in_progress to encourage the console drivers to - * disregard their internal spin locks: in the current calling - * context the risk of deadlock is a bigger problem than risks - * due to re-entering the console driver. We operate directly on - * oops_in_progress rather than using bust_spinlocks() because - * the calls bust_spinlocks() makes on exit are not appropriate - * for this calling context. - */ - ++oops_in_progress; - c->write(c, msg, msg_len); - --oops_in_progress; + + if (flags & CON_NBCON) { + struct nbcon_write_context wctxt = { }; + + /* + * Do not continue if the console is NBCON and the context + * can't be acquired. + */ + if (!nbcon_kdb_try_acquire(c, &wctxt)) + continue; + + nbcon_write_context_set_buf(&wctxt, (char *)msg, msg_len); + + c->write_atomic(c, &wctxt); + nbcon_kdb_release(&wctxt); + } else { + /* + * Set oops_in_progress to encourage the console drivers to + * disregard their internal spin locks: in the current calling + * context the risk of deadlock is a bigger problem than risks + * due to re-entering the console driver. We operate directly on + * oops_in_progress rather than using bust_spinlocks() because + * the calls bust_spinlocks() makes on exit are not appropriate + * for this calling context. + */ + ++oops_in_progress; + c->write(c, msg, msg_len); + --oops_in_progress; + } touch_nmi_watchdog(); } console_srcu_read_unlock(cookie); diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index f72bbfa266d6..5f5f626f4279 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -3,7 +3,6 @@ * internal.h - printk internal definitions */ #include <linux/console.h> -#include <linux/percpu.h> #include <linux/types.h> #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) @@ -112,47 +111,6 @@ bool nbcon_kthread_create(struct console *con); void nbcon_kthread_stop(struct console *con); void nbcon_kthreads_wake(void); -/* - * Check if the given console is currently capable and allowed to print - * records. Note that this function does not consider the current context, - * which can also play a role in deciding if @con can be used to print - * records. - */ -static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) -{ - if (!(flags & CON_ENABLED)) - return false; - - if ((flags & CON_SUSPENDED)) - return false; - - if (flags & CON_NBCON) { - /* The write_atomic() callback is optional. */ - if (use_atomic && !con->write_atomic) - return false; - - /* - * For the !use_atomic case, @printk_kthreads_running is not - * checked because the write_thread() callback is also used - * via the legacy loop when the printer threads are not - * available. - */ - } else { - if (!con->write) - return false; - } - - /* - * Console drivers may assume that per-cpu resources have been - * allocated. So unless they're explicitly marked as being able to - * cope (CON_ANYTIME) don't call them until this CPU is officially up. - */ - if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME)) - return false; - - return true; -} - /** * nbcon_kthread_wake - Wake up a console printing thread * @con: Console to operate on @@ -204,9 +162,6 @@ static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *hand static inline void nbcon_kthread_wake(struct console *con) { } static inline void nbcon_kthreads_wake(void) { } -static inline bool console_is_usable(struct console *con, short flags, - bool use_atomic) { return false; } - #endif /* CONFIG_PRINTK */ extern bool have_boot_console; @@ -230,6 +185,8 @@ struct console_flush_type { bool legacy_offload; }; +extern bool console_irqwork_blocked; + /* * Identify which console flushing methods should be used in the context of * the caller. @@ -241,7 +198,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft) switch (nbcon_get_default_prio()) { case NBCON_PRIO_NORMAL: if (have_nbcon_console && !have_boot_console) { - if (printk_kthreads_running) + if (printk_kthreads_running && !console_irqwork_blocked) ft->nbcon_offload = true; else ft->nbcon_atomic = true; @@ -251,7 +208,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft) if (have_legacy_console || have_boot_console) { if (!is_printk_legacy_deferred()) ft->legacy_direct = true; - else + else if (!console_irqwork_blocked) ft->legacy_offload = true; } break; @@ -264,7 +221,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft) if (have_legacy_console || have_boot_console) { if (!is_printk_legacy_deferred()) ft->legacy_direct = true; - else + else if (!console_irqwork_blocked) ft->legacy_offload = true; } break; diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c index 558ef3177976..3fa403f9831f 100644 --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -10,6 +10,7 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/irqflags.h> +#include <linux/kdb.h> #include <linux/kthread.h> #include <linux/minmax.h> #include <linux/panic.h> @@ -118,6 +119,9 @@ * from scratch. */ +/* Counter of active nbcon emergency contexts. */ +static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0); + /** * nbcon_state_set - Helper function to set the console state * @con: Console to update @@ -249,13 +253,16 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, * since all non-panic CPUs are stopped during panic(), it * is safer to have them avoid gaining console ownership. * - * If this acquire is a reacquire (and an unsafe takeover + * One exception is when kdb has locked for printing on this CPU. + * + * Second exception is a reacquire (and an unsafe takeover * has not previously occurred) then it is allowed to attempt * a direct acquire in panic. This gives console drivers an * opportunity to perform any necessary cleanup if they were * interrupted by the panic CPU while printing. */ if (panic_on_other_cpu() && + !kdb_printf_on_this_cpu() && (!is_reacquire || cur->unsafe_takeover)) { return -EPERM; } @@ -850,8 +857,8 @@ out: return nbcon_context_can_proceed(ctxt, &cur); } -static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, - char *buf, unsigned int len) +void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, + char *buf, unsigned int len) { struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); struct console *con = ctxt->console; @@ -1163,6 +1170,17 @@ static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_contex if (kthread_should_stop()) return true; + /* + * Block the kthread when the system is in an emergency or panic mode. + * It increases the chance that these contexts would be able to show + * the messages directly. And it reduces the risk of interrupted writes + * where the context with a higher priority takes over the nbcon console + * ownership in the middle of a message. + */ + if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) || + unlikely(panic_in_progress())) + return false; + cookie = console_srcu_read_lock(); flags = console_srcu_read_flags(con); @@ -1214,6 +1232,14 @@ wait_for_event: if (kthread_should_stop()) return 0; + /* + * Block the kthread when the system is in an emergency or panic + * mode. See nbcon_kthread_should_wakeup() for more details. + */ + if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) || + unlikely(panic_in_progress())) + goto wait_for_event; + backlog = false; /* @@ -1276,6 +1302,13 @@ void nbcon_kthreads_wake(void) if (!printk_kthreads_running) return; + /* + * It is not allowed to call this function when console irq_work + * is blocked. + */ + if (WARN_ON_ONCE(console_irqwork_blocked)) + return; + cookie = console_srcu_read_lock(); for_each_console_srcu(con) { if (!(console_srcu_read_flags(con) & CON_NBCON)) @@ -1404,6 +1437,26 @@ enum nbcon_prio nbcon_get_default_prio(void) return NBCON_PRIO_NORMAL; } +/* + * Track if it is allowed to perform unsafe hostile takeovers of console + * ownership. When true, console drivers might perform unsafe actions while + * printing. It is externally available via nbcon_allow_unsafe_takeover(). + */ +static bool panic_nbcon_allow_unsafe_takeover; + +/** + * nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed + * + * Return: True, when it is permitted to perform unsafe console printing + * + * This is also used by console_is_usable() to determine if it is allowed to + * call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE). + */ +bool nbcon_allow_unsafe_takeover(void) +{ + return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover; +} + /** * nbcon_legacy_emit_next_record - Print one record for an nbcon console * in legacy contexts @@ -1474,7 +1527,6 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, * write_atomic() callback * @con: The nbcon console to flush * @stop_seq: Flush up until this record - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers * * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on * failure. @@ -1493,8 +1545,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, * returned, it cannot be expected that the unfinalized record will become * available. */ -static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, - bool allow_unsafe_takeover) +static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) { struct nbcon_write_context wctxt = { }; struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); @@ -1503,12 +1554,12 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, ctxt->console = con; ctxt->spinwait_max_us = 2000; ctxt->prio = nbcon_get_default_prio(); - ctxt->allow_unsafe_takeover = allow_unsafe_takeover; - - if (!nbcon_context_try_acquire(ctxt, false)) - return -EPERM; + ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover(); while (nbcon_seq_read(con) < stop_seq) { + if (!nbcon_context_try_acquire(ctxt, false)) + return -EPERM; + /* * nbcon_emit_next_record() returns false when the console was * handed over or taken over. In both cases the context is no @@ -1517,6 +1568,8 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, if (!nbcon_emit_next_record(&wctxt, true)) return -EAGAIN; + nbcon_context_release(ctxt); + if (!ctxt->backlog) { /* Are there reserved but not yet finalized records? */ if (nbcon_seq_read(con) < stop_seq) @@ -1525,7 +1578,6 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, } } - nbcon_context_release(ctxt); return err; } @@ -1534,15 +1586,13 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, * write_atomic() callback * @con: The nbcon console to flush * @stop_seq: Flush up until this record - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers * * This will stop flushing before @stop_seq if another context has ownership. * That context is then responsible for the flushing. Likewise, if new records * are added while this context was flushing and there is no other context * to handle the printing, this context must also flush those records. */ -static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, - bool allow_unsafe_takeover) +static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) { struct console_flush_type ft; unsigned long flags; @@ -1557,7 +1607,7 @@ again: */ local_irq_save(flags); - err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover); + err = __nbcon_atomic_flush_pending_con(con, stop_seq); local_irq_restore(flags); @@ -1589,9 +1639,8 @@ again: * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their * write_atomic() callback * @stop_seq: Flush up until this record - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers */ -static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover) +static void __nbcon_atomic_flush_pending(u64 stop_seq) { struct console *con; int cookie; @@ -1609,7 +1658,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove if (nbcon_seq_read(con) >= stop_seq) continue; - nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover); + nbcon_atomic_flush_pending_con(con, stop_seq); } console_srcu_read_unlock(cookie); } @@ -1625,7 +1674,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeove */ void nbcon_atomic_flush_pending(void) { - __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false); + __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb)); } /** @@ -1637,7 +1686,9 @@ void nbcon_atomic_flush_pending(void) */ void nbcon_atomic_flush_unsafe(void) { - __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true); + panic_nbcon_allow_unsafe_takeover = true; + __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb)); + panic_nbcon_allow_unsafe_takeover = false; } /** @@ -1655,6 +1706,8 @@ void nbcon_cpu_emergency_enter(void) preempt_disable(); + atomic_inc(&nbcon_cpu_emergency_cnt); + cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); (*cpu_emergency_nesting)++; } @@ -1669,10 +1722,24 @@ void nbcon_cpu_emergency_exit(void) unsigned int *cpu_emergency_nesting; cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); - if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0)) (*cpu_emergency_nesting)--; + /* + * Wake up kthreads because there might be some pending messages + * added by other CPUs with normal priority since the last flush + * in the emergency context. + */ + if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) { + if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) { + struct console_flush_type ft; + + printk_get_console_flush_type(&ft); + if (ft.nbcon_offload) + nbcon_kthreads_wake(); + } + } + preempt_enable(); } @@ -1844,14 +1911,75 @@ void nbcon_device_release(struct console *con) * using the legacy loop. */ if (ft.nbcon_atomic) { - __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false); + __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb)); } else if (ft.legacy_direct) { if (console_trylock()) console_unlock(); } else if (ft.legacy_offload) { - printk_trigger_flush(); + defer_console_output(); } } console_srcu_read_unlock(cookie); } EXPORT_SYMBOL_GPL(nbcon_device_release); + +/** + * nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe + * section + * @con: The nbcon console to acquire + * @wctxt: The nbcon write context to be used on success + * + * Context: Under console_srcu_read_lock() for emitting a single kdb message + * using the given con->write_atomic() callback. Can be called + * only when the console is usable at the moment. + * + * Return: True if the console was acquired. False otherwise. + * + * kdb emits messages on consoles registered for printk() without + * storing them into the ring buffer. It has to acquire the console + * ownerhip so that it could call con->write_atomic() callback a safe way. + * + * This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY + * and marks it unsafe for handover/takeover. + */ +bool nbcon_kdb_try_acquire(struct console *con, + struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + + memset(ctxt, 0, sizeof(*ctxt)); + ctxt->console = con; + ctxt->prio = NBCON_PRIO_EMERGENCY; + + if (!nbcon_context_try_acquire(ctxt, false)) + return false; + + if (!nbcon_context_enter_unsafe(ctxt)) + return false; + + return true; +} + +/** + * nbcon_kdb_release - Exit unsafe section and release the nbcon console + * + * @wctxt: The nbcon write context initialized by a successful + * nbcon_kdb_try_acquire() + */ +void nbcon_kdb_release(struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + + if (!nbcon_context_exit_unsafe(ctxt)) + return; + + nbcon_context_release(ctxt); + + /* + * Flush any new printk() messages added when the console was blocked. + * Only the console used by the given write context was blocked. + * The console was locked only when the write_atomic() callback + * was usable. + */ + __nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb)); +} diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 5aee9ffb16b9..7394f1b6033b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -462,6 +462,9 @@ bool have_boot_console; /* See printk_legacy_allow_panic_sync() for details. */ bool legacy_allow_panic_sync; +/* Avoid using irq_work when suspending. */ +bool console_irqwork_blocked; + #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); @@ -2390,7 +2393,7 @@ asmlinkage int vprintk_emit(int facility, int level, /* If called from the scheduler, we can not call up(). */ if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; - ft.legacy_offload |= ft.legacy_direct; + ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked; ft.legacy_direct = false; } @@ -2426,7 +2429,7 @@ asmlinkage int vprintk_emit(int facility, int level, if (ft.legacy_offload) defer_console_output(); - else + else if (!console_irqwork_blocked) wake_up_klogd(); return printed_len; @@ -2730,10 +2733,20 @@ void console_suspend_all(void) { struct console *con; + if (console_suspend_enabled) + pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); + + /* + * Flush any console backlog and then avoid queueing irq_work until + * console_resume_all(). Until then deferred printing is no longer + * triggered, NBCON consoles transition to atomic flushing, and + * any klogd waiters are not triggered. + */ + pr_flush(1000, true); + console_irqwork_blocked = true; + if (!console_suspend_enabled) return; - pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); - pr_flush(1000, true); console_list_lock(); for_each_console(con) @@ -2754,26 +2767,34 @@ void console_resume_all(void) struct console_flush_type ft; struct console *con; - if (!console_suspend_enabled) - return; - - console_list_lock(); - for_each_console(con) - console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); - console_list_unlock(); - /* - * Ensure that all SRCU list walks have completed. All printing - * contexts must be able to see they are no longer suspended so - * that they are guaranteed to wake up and resume printing. + * Allow queueing irq_work. After restoring console state, deferred + * printing and any klogd waiters need to be triggered in case there + * is now a console backlog. */ - synchronize_srcu(&console_srcu); + console_irqwork_blocked = false; + + if (console_suspend_enabled) { + console_list_lock(); + for_each_console(con) + console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); + console_list_unlock(); + + /* + * Ensure that all SRCU list walks have completed. All printing + * contexts must be able to see they are no longer suspended so + * that they are guaranteed to wake up and resume printing. + */ + synchronize_srcu(&console_srcu); + } printk_get_console_flush_type(&ft); if (ft.nbcon_offload) nbcon_kthreads_wake(); if (ft.legacy_offload) defer_console_output(); + else + wake_up_klogd(); pr_flush(1000, true); } @@ -3002,21 +3023,18 @@ out: } /* - * Legacy console printing from printk() caller context does not respect - * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a - * false positive. For PREEMPT_RT the false positive condition does not - * occur. - * - * This map is used to temporarily establish LD_WAIT_SLEEP context for the - * console write() callback when legacy printing to avoid false positive - * lockdep complaints, thus allowing lockdep to continue to function for - * real issues. + * The legacy console always acquires a spinlock_t from its printing + * callback. This violates lock nesting if the caller acquired an always + * spinning lock (raw_spinlock_t) while invoking printk(). This is not a + * problem on PREEMPT_RT because legacy consoles print always from a + * dedicated thread and never from within printk(). Therefore we tell + * lockdep that a sleeping spin lock (spinlock_t) is valid here. */ #ifdef CONFIG_PREEMPT_RT static inline void printk_legacy_allow_spinlock_enter(void) { } static inline void printk_legacy_allow_spinlock_exit(void) { } #else -static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP); +static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG); static inline void printk_legacy_allow_spinlock_enter(void) { @@ -3134,104 +3152,147 @@ static inline void printk_kthreads_check_locked(void) { } #endif /* CONFIG_PRINTK */ + /* - * Print out all remaining records to all consoles. + * Print out one record for each console. * * @do_cond_resched is set by the caller. It can be true only in schedulable * context. * * @next_seq is set to the sequence number after the last available record. - * The value is valid only when this function returns true. It means that all - * usable consoles are completely flushed. + * The value is valid only when all usable consoles were flushed. It is + * when the function returns true (can do the job) and @try_again parameter + * is set to false, see below. * * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding the * console_lock. Otherwise it is set to false. * - * Returns true when there was at least one usable console and all messages - * were flushed to all usable consoles. A returned false informs the caller - * that everything was not flushed (either there were no usable consoles or - * another context has taken over printing or it is a panic situation and this - * is not the panic CPU). Regardless the reason, the caller should assume it - * is not useful to immediately try again. + * @try_again will be set to true when it still makes sense to call this + * function again. The function could do the job, see the return value. + * And some consoles still make progress. + * + * Returns true when the function could do the job. Some consoles are usable, + * and there was no takeover and no panic_on_other_cpu(). * * Requires the console_lock. */ -static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) +static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover, + bool *try_again) { struct console_flush_type ft; bool any_usable = false; struct console *con; - bool any_progress; int cookie; - *next_seq = 0; - *handover = false; + *try_again = false; - do { - any_progress = false; + printk_get_console_flush_type(&ft); - printk_get_console_flush_type(&ft); + cookie = console_srcu_read_lock(); + for_each_console_srcu(con) { + short flags = console_srcu_read_flags(con); + u64 printk_seq; + bool progress; - cookie = console_srcu_read_lock(); - for_each_console_srcu(con) { - short flags = console_srcu_read_flags(con); - u64 printk_seq; - bool progress; + /* + * console_flush_one_record() is only responsible for + * nbcon consoles when the nbcon consoles cannot print via + * their atomic or threaded flushing. + */ + if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) + continue; - /* - * console_flush_all() is only responsible for nbcon - * consoles when the nbcon consoles cannot print via - * their atomic or threaded flushing. - */ - if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) - continue; + if (!console_is_usable(con, flags, !do_cond_resched)) + continue; + any_usable = true; - if (!console_is_usable(con, flags, !do_cond_resched)) - continue; - any_usable = true; + if (flags & CON_NBCON) { + progress = nbcon_legacy_emit_next_record(con, handover, cookie, + !do_cond_resched); + printk_seq = nbcon_seq_read(con); + } else { + progress = console_emit_next_record(con, handover, cookie); + printk_seq = con->seq; + } - if (flags & CON_NBCON) { - progress = nbcon_legacy_emit_next_record(con, handover, cookie, - !do_cond_resched); - printk_seq = nbcon_seq_read(con); - } else { - progress = console_emit_next_record(con, handover, cookie); - printk_seq = con->seq; - } + /* + * If a handover has occurred, the SRCU read lock + * is already released. + */ + if (*handover) + goto fail; - /* - * If a handover has occurred, the SRCU read lock - * is already released. - */ - if (*handover) - return false; + /* Track the next of the highest seq flushed. */ + if (printk_seq > *next_seq) + *next_seq = printk_seq; - /* Track the next of the highest seq flushed. */ - if (printk_seq > *next_seq) - *next_seq = printk_seq; + if (!progress) + continue; - if (!progress) - continue; - any_progress = true; + /* + * An usable console made a progress. There might still be + * pending messages. + */ + *try_again = true; - /* Allow panic_cpu to take over the consoles safely. */ - if (panic_on_other_cpu()) - goto abandon; + /* Allow panic_cpu to take over the consoles safely. */ + if (panic_on_other_cpu()) + goto fail_srcu; - if (do_cond_resched) - cond_resched(); - } - console_srcu_read_unlock(cookie); - } while (any_progress); + if (do_cond_resched) + cond_resched(); + } + console_srcu_read_unlock(cookie); return any_usable; -abandon: +fail_srcu: console_srcu_read_unlock(cookie); +fail: + *try_again = false; return false; } +/* + * Print out all remaining records to all consoles. + * + * @do_cond_resched is set by the caller. It can be true only in schedulable + * context. + * + * @next_seq is set to the sequence number after the last available record. + * The value is valid only when this function returns true. It means that all + * usable consoles are completely flushed. + * + * @handover will be set to true if a printk waiter has taken over the + * console_lock, in which case the caller is no longer holding the + * console_lock. Otherwise it is set to false. + * + * Returns true when there was at least one usable console and all messages + * were flushed to all usable consoles. A returned false informs the caller + * that everything was not flushed (either there were no usable consoles or + * another context has taken over printing or it is a panic situation and this + * is not the panic CPU). Regardless the reason, the caller should assume it + * is not useful to immediately try again. + * + * Requires the console_lock. + */ +static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) +{ + bool try_again; + bool ret; + + *next_seq = 0; + *handover = false; + + do { + ret = console_flush_one_record(do_cond_resched, next_seq, + handover, &try_again); + } while (try_again); + + return ret; +} + static void __console_flush_and_unlock(void) { bool do_cond_resched; @@ -3331,12 +3392,10 @@ void console_unblank(void) */ cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - short flags = console_srcu_read_flags(c); - - if (flags & CON_SUSPENDED) + if (!console_is_usable(c, console_srcu_read_flags(c), true)) continue; - if ((flags & CON_ENABLED) && c->unblank) { + if (c->unblank) { found_unblank = true; break; } @@ -3373,12 +3432,10 @@ void console_unblank(void) cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - short flags = console_srcu_read_flags(c); - - if (flags & CON_SUSPENDED) + if (!console_is_usable(c, console_srcu_read_flags(c), true)) continue; - if ((flags & CON_ENABLED) && c->unblank) + if (c->unblank) c->unblank(); } console_srcu_read_unlock(cookie); @@ -3601,17 +3658,26 @@ static bool legacy_kthread_should_wakeup(void) static int legacy_kthread_func(void *unused) { - for (;;) { - wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); + bool try_again; + +wait_for_event: + wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); + + do { + bool handover = false; + u64 next_seq = 0; if (kthread_should_stop()) - break; + return 0; console_lock(); - __console_flush_and_unlock(); - } + console_flush_one_record(true, &next_seq, &handover, &try_again); + if (!handover) + __console_unlock(); - return 0; + } while (try_again); + + goto wait_for_event; } static bool legacy_kthread_create(void) @@ -4511,6 +4577,13 @@ static void __wake_up_klogd(int val) if (!printk_percpu_data_ready()) return; + /* + * It is not allowed to call this function when console irq_work + * is blocked. + */ + if (WARN_ON_ONCE(console_irqwork_blocked)) + return; + preempt_disable(); /* * Guarantee any new records can be seen by tasks preparing to wait @@ -4567,9 +4640,30 @@ void defer_console_output(void) __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); } +/** + * printk_trigger_flush - Attempt to flush printk buffer to consoles. + * + * If possible, flush the printk buffer to all consoles in the caller's + * context. If offloading is available, trigger deferred printing. + * + * This is best effort. Depending on the system state, console states, + * and caller context, no actual flushing may result from this call. + */ void printk_trigger_flush(void) { - defer_console_output(); + struct console_flush_type ft; + + printk_get_console_flush_type(&ft); + if (ft.nbcon_atomic) + nbcon_atomic_flush_pending(); + if (ft.nbcon_offload) + nbcon_kthreads_wake(); + if (ft.legacy_direct) { + if (console_trylock()) + console_unlock(); + } + if (ft.legacy_offload) + defer_console_output(); } int vprintk_deferred(const char *fmt, va_list args) diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c index 40198bffb7d0..56c8e3d031f4 100644 --- a/kernel/printk/printk_ringbuffer.c +++ b/kernel/printk/printk_ringbuffer.c @@ -411,6 +411,23 @@ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) return to_blk_size(size) <= DATA_SIZE(data_ring) / 2; } +/* + * Compare the current and requested logical position and decide + * whether more space is needed. + * + * Return false when @lpos_current is already at or beyond @lpos_target. + * + * Also return false when the difference between the positions is bigger + * than the size of the data buffer. It might happen only when the caller + * raced with another CPU(s) which already made and used the space. + */ +static bool need_more_space(struct prb_data_ring *data_ring, + unsigned long lpos_current, + unsigned long lpos_target) +{ + return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring); +} + /* Query the state of a descriptor. */ static enum desc_state get_desc_state(unsigned long id, unsigned long state_val) @@ -577,7 +594,7 @@ static bool data_make_reusable(struct printk_ringbuffer *rb, unsigned long id; /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ - while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { + while (need_more_space(data_ring, lpos_begin, lpos_end)) { blk = to_block(data_ring, lpos_begin); /* @@ -668,7 +685,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos) * sees the new tail lpos, any descriptor states that transitioned to * the reusable state must already be visible. */ - while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { + while (need_more_space(data_ring, tail_lpos, lpos)) { /* * Make all descriptors reusable that are associated with * data blocks before @lpos. @@ -999,6 +1016,17 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) return true; } +static bool is_blk_wrapped(struct prb_data_ring *data_ring, + unsigned long begin_lpos, unsigned long next_lpos) +{ + /* + * Subtract one from next_lpos since it's not actually part of this data + * block. This allows perfectly fitting records to not wrap. + */ + return DATA_WRAPS(data_ring, begin_lpos) != + DATA_WRAPS(data_ring, next_lpos - 1); +} + /* Determine the end of a data block. */ static unsigned long get_next_lpos(struct prb_data_ring *data_ring, unsigned long lpos, unsigned int size) @@ -1010,7 +1038,7 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring, next_lpos = lpos + size; /* First check if the data block does not wrap. */ - if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) + if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos)) return next_lpos; /* Wrapping data blocks store their data at the beginning. */ @@ -1087,7 +1115,7 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, blk = to_block(data_ring, begin_lpos); blk->id = id; /* LMM(data_alloc:B) */ - if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { + if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) { /* Wrapping data blocks store their data at the beginning. */ blk = to_block(data_ring, 0); @@ -1131,14 +1159,21 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, return NULL; /* Keep track if @blk_lpos was a wrapping data block. */ - wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); + wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next); size = to_blk_size(size); next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); - /* If the data block does not increase, there is nothing to do. */ - if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { + /* + * Use the current data block when the size does not increase, i.e. + * when @head_lpos is already able to accommodate the new @next_lpos. + * + * Note that need_more_space() could never return false here because + * the difference between the positions was bigger than the data + * buffer size. The data block is reopened and can't get reused. + */ + if (!need_more_space(data_ring, head_lpos, next_lpos)) { if (wrapped) blk = to_block(data_ring, 0); else @@ -1167,7 +1202,7 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, blk = to_block(data_ring, blk_lpos->begin); - if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { + if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) { struct prb_data_block *old_blk = blk; /* Wrapping data blocks store their data at the beginning. */ @@ -1203,7 +1238,7 @@ static unsigned int space_used(struct prb_data_ring *data_ring, if (BLK_DATALESS(blk_lpos)) return 0; - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { /* Data block does not wrap. */ return (DATA_INDEX(data_ring, blk_lpos->next) - DATA_INDEX(data_ring, blk_lpos->begin)); @@ -1249,15 +1284,15 @@ static const char *get_data(struct prb_data_ring *data_ring, return NULL; } - /* Regular data block: @begin less than @next and in same wrap. */ - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && - blk_lpos->begin < blk_lpos->next) { + /* Regular data block: @begin and @next in the same wrap. */ + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { db = to_block(data_ring, blk_lpos->begin); *data_size = blk_lpos->next - blk_lpos->begin; /* Wrapping data block: @begin is one wrap behind @next. */ - } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == - DATA_WRAPS(data_ring, blk_lpos->next)) { + } else if (!is_blk_wrapped(data_ring, + blk_lpos->begin + DATA_SIZE(data_ring), + blk_lpos->next)) { db = to_block(data_ring, 0); *data_size = DATA_INDEX(data_ring, blk_lpos->next); @@ -1267,6 +1302,10 @@ static const char *get_data(struct prb_data_ring *data_ring, return NULL; } + /* Sanity check. Data-less blocks were handled earlier. */ + if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size)) + return NULL; + /* A valid data block will always be aligned to the ID size. */ if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 97db0b0ccf3e..14f86f0a8bc7 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1467,12 +1467,12 @@ trace_hwlat_print(struct trace_iterator *iter, int flags, trace_assign_type(field, entry); - trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d", + trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d", field->seqnum, field->duration, field->outer_duration, - (long long)field->timestamp.tv_sec, - field->timestamp.tv_nsec, field->count); + &field->timestamp, + field->count); if (field->nmi_count) { /* diff --git a/lib/tests/printf_kunit.c b/lib/tests/printf_kunit.c index bc54cca2d7a6..7617e5b8b02c 100644 --- a/lib/tests/printf_kunit.c +++ b/lib/tests/printf_kunit.c @@ -504,6 +504,7 @@ time_and_date(struct kunit *kunittest) }; /* 2019-01-04T15:32:23 */ time64_t t = 1546615943; + struct timespec64 ts = { .tv_sec = t, .tv_nsec = 11235813 }; test("(%pt?)", "%pt", &tm); test("2018-11-26T05:35:43", "%ptR", &tm); @@ -522,6 +523,9 @@ time_and_date(struct kunit *kunittest) test("0119-00-04 15:32:23", "%ptTsr", &t); test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t); test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t); + + test("2019-01-04T15:32:23.011235813", "%ptS", &ts); + test("1546615943.011235813", "%ptSp", &ts); } static void diff --git a/lib/vsprintf.c b/lib/vsprintf.c index eb0cb11d0d12..a3790c43a0ab 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -582,17 +582,18 @@ char *number(char *buf, char *end, unsigned long long num, return buf; } +#define special_hex_spec(size) \ +(struct printf_spec) { \ + .field_width = 2 + 2 * (size), /* 0x + hex */ \ + .flags = SPECIAL | SMALL | ZEROPAD, \ + .base = 16, \ + .precision = -1, \ +} + static noinline_for_stack char *special_hex_number(char *buf, char *end, unsigned long long num, int size) { - struct printf_spec spec; - - spec.field_width = 2 + 2 * size; /* 0x + hex */ - spec.flags = SPECIAL | SMALL | ZEROPAD; - spec.base = 16; - spec.precision = -1; - - return number(buf, end, num, spec); + return number(buf, end, num, special_hex_spec(size)); } static void move_right(char *buf, char *end, unsigned len, unsigned spaces) @@ -1164,18 +1165,11 @@ char *range_string(char *buf, char *end, const struct range *range, char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")]; char *p = sym, *pend = sym + sizeof(sym); - struct printf_spec range_spec = { - .field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */ - .flags = SPECIAL | SMALL | ZEROPAD, - .base = 16, - .precision = -1, - }; - if (check_pointer(&buf, end, range, spec)) return buf; p = string_nocheck(p, pend, "[range ", default_str_spec); - p = hex_range(p, pend, range->start, range->end, range_spec); + p = hex_range(p, pend, range->start, range->end, special_hex_spec(sizeof(range->start))); *p++ = ']'; *p = '\0'; @@ -1928,9 +1922,6 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm, bool found = true; int count = 2; - if (check_pointer(&buf, end, tm, spec)) - return buf; - switch (fmt[count]) { case 'd': have_t = false; @@ -1993,12 +1984,39 @@ char *time64_str(char *buf, char *end, const time64_t time, } static noinline_for_stack +char *timespec64_str(char *buf, char *end, const struct timespec64 *ts, + struct printf_spec spec, const char *fmt) +{ + static const struct printf_spec default_dec09_spec = { + .base = 10, + .field_width = 9, + .precision = -1, + .flags = ZEROPAD, + }; + + if (fmt[2] == 'p') + buf = number(buf, end, ts->tv_sec, default_dec_spec); + else + buf = time64_str(buf, end, ts->tv_sec, spec, fmt); + if (buf < end) + *buf = '.'; + buf++; + + return number(buf, end, ts->tv_nsec, default_dec09_spec); +} + +static noinline_for_stack char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec, const char *fmt) { + if (check_pointer(&buf, end, ptr, spec)) + return buf; + switch (fmt[1]) { case 'R': return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt); + case 'S': + return timespec64_str(buf, end, (const struct timespec64 *)ptr, spec, fmt); case 'T': return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt); default: @@ -2462,9 +2480,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable); * - 'd[234]' For a dentry name (optionally 2-4 last components) * - 'D[234]' Same as 'd' but for a struct file * - 'g' For block_device name (gendisk + partition number) - * - 't[RT][dt][r][s]' For time and date as represented by: + * - 't[RST][dt][r][s]' For time and date as represented by: * R struct rtc_time + * S struct timespec64 * T time64_t + * - 'tSp' For time represented by struct timespec64 printed as <seconds>.<nanoseconds> * - 'C' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'G' For flags to be printed as a collection of symbolic strings that would @@ -2883,10 +2903,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt_str, va_list args) case FORMAT_STATE_NUM: { unsigned long long num; - if (fmt.size <= sizeof(int)) - num = convert_num_spec(va_arg(args, int), fmt.size, spec); - else + + if (fmt.size > sizeof(int)) num = va_arg(args, long long); + else + num = convert_num_spec(va_arg(args, int), fmt.size, spec); str = number(str, end, num, spec); continue; } @@ -3054,8 +3075,8 @@ EXPORT_SYMBOL(scnprintf); * @fmt: The format string to use * @args: Arguments for the format string * - * The function returns the number of characters written - * into @buf. Use vsnprintf() or vscnprintf() in order to avoid + * The return value is the number of characters written into @buf not including + * the trailing '\0'. Use vsnprintf() or vscnprintf() in order to avoid * buffer overflows. * * If you're not already dealing with a va_list consider using sprintf(). @@ -3074,8 +3095,8 @@ EXPORT_SYMBOL(vsprintf); * @fmt: The format string to use * @...: Arguments for the format string * - * The function returns the number of characters written - * into @buf. Use snprintf() or scnprintf() in order to avoid + * The return value is the number of characters written into @buf not including + * the trailing '\0'. Use snprintf() or scnprintf() in order to avoid * buffer overflows. * * See the vsnprintf() documentation for format string extensions over C99. @@ -3394,11 +3415,10 @@ int bstr_printf(char *buf, size_t size, const char *fmt_str, const u32 *bin_buf) goto out; case FORMAT_STATE_NUM: - if (fmt.size > sizeof(int)) { + if (fmt.size > sizeof(int)) num = get_arg(long long); - } else { + else num = convert_num_spec(get_arg(int), fmt.size, spec); - } str = number(str, end, num, spec); continue; } diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index 9e48623018a3..833e57849c1d 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -1538,8 +1538,7 @@ static int prepare_keepalive2(struct ceph_connection *con) struct timespec64 now; ktime_get_real_ts64(&now); - dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec, - now.tv_nsec); + dout("%s con %p timestamp %ptSp\n", __func__, con, &now); ceph_encode_timespec64(ts, &now); @@ -2732,8 +2731,7 @@ static int process_keepalive2_ack(struct ceph_connection *con, ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad); ceph_decode_timespec64(&con->last_keepalive_ack, p); - dout("%s con %p timestamp %lld.%09ld\n", __func__, con, - con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec); + dout("%s con %p timestamp %ptSp\n", __func__, con, &con->last_keepalive_ack); return 0; |