2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 10:44:14 +08:00

seqlock: Introduce raw_read_seqcount_latch()

Because with latches there is a strict data dependency on the seq load
we can avoid the rmb in favour of a read_barrier_depends.

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Peter Zijlstra 2015-05-27 11:09:36 +09:30 committed by Rusty Russell
parent 0a04b01669
commit 7fc26327b7
2 changed files with 8 additions and 3 deletions

View File

@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <asm/processor.h>
/*
@ -233,6 +234,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
s->sequence++;
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
return lockless_dereference(s->sequence);
}
/**
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
@ -284,8 +290,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* unsigned seq, idx;
*
* do {
* seq = latch->seq;
* smp_rmb();
* seq = lockless_dereference(latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);

View File

@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;
do {
seq = raw_read_seqcount(&tkf->seq);
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
} while (read_seqcount_retry(&tkf->seq, seq));