mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 16:14:13 +08:00
dmaengine:idxd: Use local64_try_cmpxchg in perfmon_pmu_event_update
Use local64_try_cmpxchg instead of local64_cmpxchg (*ptr, old, new) == old in perfmon_pmu_event_update. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails. There is no need to re-read the value in the loop. No functional change intended. Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vkoul@kernel.org> Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Reviewed-by: Tom Zanussi <tom.zanussi@linux.intel.com> Link: https://lore.kernel.org/r/20230703145346.5206-1-ubizjak@gmail.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
f1de55ff7c
commit
cae701b9cc
@ -245,12 +245,11 @@ static void perfmon_pmu_event_update(struct perf_event *event)
|
||||
int shift = 64 - idxd->idxd_pmu->counter_width;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
do {
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = perfmon_pmu_read_counter(event);
|
||||
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count);
|
||||
|
||||
} while (!local64_try_cmpxchg(&hwc->prev_count,
|
||||
&prev_raw_count, new_raw_count));
|
||||
n = (new_raw_count << shift);
|
||||
p = (prev_raw_count << shift);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user