mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
x86, pat: Update the page flags for memtype atomically instead of using memtype_lock
While testing an application using the xpmem (out of kernel) driver, we noticed a significant page fault rate reduction of x86_64 with respect to ia64. For one test running with 32 cpus, one thread per cpu, it took 01:08 for each of the threads to vm_insert_pfn 2GB worth of pages. For the same test running on 256 cpus, one thread per cpu, it took 14:48 to vm_insert_pfn 2 GB worth of pages. The slowdown was tracked to lookup_memtype which acquires the spinlock memtype_lock. This heavily contended lock was slowing down vm_insert_pfn(). With the cmpxchg on page->flags method, both the 32 cpu and 256 cpu cases take approx 00:01.3 seconds to complete. Signed-off-by: Robin Holt <holt@sgi.com> LKML-Reference: <20100423153627.751194346@gulag1.americas.sgi.com> Cc: Venkatesh Pallipadi <venkatesh.pallipadi@gmail.com> Cc: Rafael Wysocki <rjw@novell.com> Reviewed-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
4daa2a8093
commit
1f9cc3cb6a
@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
#define PG_WC PG_arch_1
|
||||
PAGEFLAG(WC, WC)
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
/*
|
||||
* X86 PAT uses page flags WC and Uncached together to keep track of
|
||||
@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
|
||||
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
|
||||
* been changed from its default (value of -1 used to denote this).
|
||||
* Note we do not support _PAGE_CACHE_UC here.
|
||||
*
|
||||
* Caller must hold memtype_lock for atomicity.
|
||||
*/
|
||||
|
||||
#define _PGMT_DEFAULT 0
|
||||
#define _PGMT_WC (1UL << PG_arch_1)
|
||||
#define _PGMT_UC_MINUS (1UL << PG_uncached)
|
||||
#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
|
||||
#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
|
||||
#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
|
||||
|
||||
static inline unsigned long get_page_memtype(struct page *pg)
|
||||
{
|
||||
if (!PageUncached(pg) && !PageWC(pg))
|
||||
unsigned long pg_flags = pg->flags & _PGMT_MASK;
|
||||
|
||||
if (pg_flags == _PGMT_DEFAULT)
|
||||
return -1;
|
||||
else if (!PageUncached(pg) && PageWC(pg))
|
||||
else if (pg_flags == _PGMT_WC)
|
||||
return _PAGE_CACHE_WC;
|
||||
else if (PageUncached(pg) && !PageWC(pg))
|
||||
else if (pg_flags == _PGMT_UC_MINUS)
|
||||
return _PAGE_CACHE_UC_MINUS;
|
||||
else
|
||||
return _PAGE_CACHE_WB;
|
||||
@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
|
||||
|
||||
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
|
||||
{
|
||||
unsigned long memtype_flags = _PGMT_DEFAULT;
|
||||
unsigned long old_flags;
|
||||
unsigned long new_flags;
|
||||
|
||||
switch (memtype) {
|
||||
case _PAGE_CACHE_WC:
|
||||
ClearPageUncached(pg);
|
||||
SetPageWC(pg);
|
||||
memtype_flags = _PGMT_WC;
|
||||
break;
|
||||
case _PAGE_CACHE_UC_MINUS:
|
||||
SetPageUncached(pg);
|
||||
ClearPageWC(pg);
|
||||
memtype_flags = _PGMT_UC_MINUS;
|
||||
break;
|
||||
case _PAGE_CACHE_WB:
|
||||
SetPageUncached(pg);
|
||||
SetPageWC(pg);
|
||||
break;
|
||||
default:
|
||||
case -1:
|
||||
ClearPageUncached(pg);
|
||||
ClearPageWC(pg);
|
||||
memtype_flags = _PGMT_WB;
|
||||
break;
|
||||
}
|
||||
|
||||
do {
|
||||
old_flags = pg->flags;
|
||||
new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
|
||||
} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
|
||||
}
|
||||
#else
|
||||
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
|
||||
|
@ -190,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
|
||||
* Here we do two pass:
|
||||
* - Find the memtype of all the pages in the range, look for any conflicts
|
||||
* - In case of no conflicts, set the new memtype for pages in the range
|
||||
*
|
||||
* Caller must hold memtype_lock for atomicity.
|
||||
*/
|
||||
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
|
||||
unsigned long *new_type)
|
||||
@ -297,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||
if (is_range_ram == 1) {
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
err = reserve_ram_pages_type(start, end, req_type, new_type);
|
||||
spin_unlock(&memtype_lock);
|
||||
|
||||
return err;
|
||||
} else if (is_range_ram < 0) {
|
||||
@ -351,9 +347,7 @@ int free_memtype(u64 start, u64 end)
|
||||
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||
if (is_range_ram == 1) {
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
err = free_ram_pages_type(start, end);
|
||||
spin_unlock(&memtype_lock);
|
||||
|
||||
return err;
|
||||
} else if (is_range_ram < 0) {
|
||||
@ -394,10 +388,8 @@ static unsigned long lookup_memtype(u64 paddr)
|
||||
|
||||
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
|
||||
struct page *page;
|
||||
spin_lock(&memtype_lock);
|
||||
page = pfn_to_page(paddr >> PAGE_SHIFT);
|
||||
rettype = get_page_memtype(page);
|
||||
spin_unlock(&memtype_lock);
|
||||
/*
|
||||
* -1 from get_page_memtype() implies RAM page is in its
|
||||
* default state and not reserved, and hence of type WB
|
||||
|
Loading…
Reference in New Issue
Block a user