mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-27 03:33:33 +08:00
x86: Use 3/4*sizeof(per-thread-L3)
as low bound for NT threshold.
On some machines we end up with incomplete cache information. This can
make the new calculation of `sizeof(total-L3)/custom-divisor` end up
lower than intended (and lower than the prior value). So reintroduce
the old bound as a lower bound to avoid potentially regressing code
where we don't have complete information to make the decision.
Reviewed-by: DJ Delorie <dj@redhat.com>
(cherry picked from commit 8b9a0af8ca
)
This commit is contained in:
parent
402324a710
commit
68a2030415
@ -797,12 +797,21 @@ init_cacheinfo (void)
|
||||
modern HW detects streaming patterns and provides proper LRU hints so that
|
||||
the maximum thrashing capped at 1/associativity. */
|
||||
unsigned long int non_temporal_threshold = shared / 4;
|
||||
|
||||
/* If the computed non_temporal_threshold <= 3/4 * per-thread L3, we most
|
||||
likely have incorrect/incomplete cache info in which case, default to
|
||||
3/4 * per-thread L3 to avoid regressions. */
|
||||
unsigned long int non_temporal_threshold_lowbound
|
||||
= shared_per_thread * 3 / 4;
|
||||
if (non_temporal_threshold < non_temporal_threshold_lowbound)
|
||||
non_temporal_threshold = non_temporal_threshold_lowbound;
|
||||
|
||||
/* If no ERMS, we use the per-thread L3 chunking. Normal cacheable stores run
|
||||
a higher risk of actually thrashing the cache as they don't have a HW LRU
|
||||
hint. As well, their performance in highly parallel situations is
|
||||
noticeably worse. */
|
||||
if (!CPU_FEATURES_CPU_P (cpu_features, ERMS))
|
||||
non_temporal_threshold = shared_per_thread * 3 / 4;
|
||||
non_temporal_threshold = non_temporal_threshold_lowbound;
|
||||
|
||||
__x86_shared_non_temporal_threshold
|
||||
= (cpu_features->non_temporal_threshold != 0
|
||||
|
Loading…
Reference in New Issue
Block a user