diff options
| author | Marco Elver <elver@google.com> | 2024-11-04 16:43:08 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2024-11-05 12:55:35 +0100 |
| commit | 93190bc35d6d4364a4d8c38ac8961dabecbff4ed (patch) | |
| tree | 6a48139a0d9385b879072145c5a94a5f0a69e837 /kernel/time/sched_clock.c | |
| parent | 5c1806c41ce0a0110db5dd4c483cf2dc28b3ddf0 (diff) | |
seqlock, treewide: Switch to non-raw seqcount_latch interface
Switch all instrumentable users of the seqcount_latch interface over to
the non-raw interface.
Co-developed-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20241104161910.780003-5-elver@google.com
Diffstat (limited to 'kernel/time/sched_clock.c')
| -rw-r--r-- | kernel/time/sched_clock.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 29bdf309dae8..fcca4e72f1ef 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -71,13 +71,13 @@ static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) { - *seq = raw_read_seqcount_latch(&cd.seq); + *seq = read_seqcount_latch(&cd.seq); return cd.read_data + (*seq & 1); } notrace int sched_clock_read_retry(unsigned int seq) { - return raw_read_seqcount_latch_retry(&cd.seq, seq); + return read_seqcount_latch_retry(&cd.seq, seq); } static __always_inline unsigned long long __sched_clock(void) @@ -132,16 +132,18 @@ unsigned long long notrace sched_clock(void) static void update_clock_read_data(struct clock_read_data *rd) { /* steer readers towards the odd copy */ - raw_write_seqcount_latch(&cd.seq); + write_seqcount_latch_begin(&cd.seq); /* now its safe for us to update the normal (even) copy */ cd.read_data[0] = *rd; /* switch readers back to the even copy */ - raw_write_seqcount_latch(&cd.seq); + write_seqcount_latch(&cd.seq); /* update the backup (odd) copy with the new data */ cd.read_data[1] = *rd; + + write_seqcount_latch_end(&cd.seq); } /* @@ -279,7 +281,7 @@ void __init generic_sched_clock_init(void) */ static u64 notrace suspended_sched_clock_read(void) { - unsigned int seq = raw_read_seqcount_latch(&cd.seq); + unsigned int seq = read_seqcount_latch(&cd.seq); return cd.read_data[seq & 1].epoch_cyc; } |