x86 PAT: fix performance drop for glx, use UC minus for ioremap(), ioremap_nocache() and pci_mmap_page_range()

Use UC_MINUS for ioremap(), ioremap_nocache() instead of strong UC.
Once all the X drivers move to ioremap_wc(), we can go back to strong
UC semantics for ioremap() and ioremap_nocache().

To avoid attribute aliasing issues, pci_mmap_page_range() will also
use UC_MINUS for default non write-combining mapping request.

Next steps:
	a) change all the video drivers using ioremap() or ioremap_nocache()
	   and adding WC MTTR using mttr_add() to ioremap_wc()

	b) for strict usage, we can go back to strong uc semantics
	   for ioremap() and ioremap_nocache() after some grace period for
	   completing step-a.

	c) user level X server needs to use the appropriate method for setting
	   up WC mapping (like using resourceX_wc sysfs file instead of
	   adding MTRR for WC and using /dev/mem or resourceX under /sys)

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Suresh Siddha 2008-04-25 17:07:22 -07:00 committed by Ingo Molnar
parent b9b39bfba5
commit de33c442ed
3 changed files with 33 additions and 9 deletions

View File

@ -176,11 +176,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/* /*
* Do not fallback to certain memory types with certain * Do not fallback to certain memory types with certain
* requested type: * requested type:
* - request is uncached, return cannot be write-back * - request is uc-, return cannot be write-back
* - request is uncached, return cannot be write-combine * - request is uc-, return cannot be write-combine
* - request is write-combine, return cannot be write-back * - request is write-combine, return cannot be write-back
*/ */
if ((prot_val == _PAGE_CACHE_UC && if ((prot_val == _PAGE_CACHE_UC_MINUS &&
(new_prot_val == _PAGE_CACHE_WB || (new_prot_val == _PAGE_CACHE_WB ||
new_prot_val == _PAGE_CACHE_WC)) || new_prot_val == _PAGE_CACHE_WC)) ||
(prot_val == _PAGE_CACHE_WC && (prot_val == _PAGE_CACHE_WC &&
@ -201,6 +201,9 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
default: default:
prot = PAGE_KERNEL_NOCACHE; prot = PAGE_KERNEL_NOCACHE;
break; break;
case _PAGE_CACHE_UC_MINUS:
prot = PAGE_KERNEL_UC_MINUS;
break;
case _PAGE_CACHE_WC: case _PAGE_CACHE_WC:
prot = PAGE_KERNEL_WC; prot = PAGE_KERNEL_WC;
break; break;
@ -255,7 +258,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
*/ */
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{ {
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC, /*
* Ideally, this should be:
* pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
*
* Till we fix all X drivers to use ioremap_wc(), we will use
* UC MINUS.
*/
unsigned long val = _PAGE_CACHE_UC_MINUS;
return __ioremap_caller(phys_addr, size, val,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);

View File

@ -777,14 +777,20 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages,
int _set_memory_uc(unsigned long addr, int numpages) int _set_memory_uc(unsigned long addr, int numpages)
{ {
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
return change_page_attr_set(addr, numpages, return change_page_attr_set(addr, numpages,
__pgprot(_PAGE_CACHE_UC)); __pgprot(_PAGE_CACHE_UC_MINUS));
} }
int set_memory_uc(unsigned long addr, int numpages) int set_memory_uc(unsigned long addr, int numpages)
{ {
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
_PAGE_CACHE_UC, NULL)) _PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL; return -EINVAL;
return _set_memory_uc(addr, numpages); return _set_memory_uc(addr, numpages);

View File

@ -301,6 +301,13 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
prot = pgprot_val(vma->vm_page_prot); prot = pgprot_val(vma->vm_page_prot);
if (pat_wc_enabled && write_combine) if (pat_wc_enabled && write_combine)
prot |= _PAGE_CACHE_WC; prot |= _PAGE_CACHE_WC;
else if (pat_wc_enabled)
/*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here
* aswell.
*/
prot |= _PAGE_CACHE_UC_MINUS;
else if (boot_cpu_data.x86 > 3) else if (boot_cpu_data.x86 > 3)
prot |= _PAGE_CACHE_UC; prot |= _PAGE_CACHE_UC;
@ -319,9 +326,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
* - request is uncached, return cannot be write-combine * - request is uncached, return cannot be write-combine
* - request is write-combine, return cannot be write-back * - request is write-combine, return cannot be write-back
*/ */
if ((flags == _PAGE_CACHE_UC && if ((flags == _PAGE_CACHE_UC_MINUS &&
(new_flags == _PAGE_CACHE_WB || (new_flags == _PAGE_CACHE_WB)) ||
new_flags == _PAGE_CACHE_WC)) ||
(flags == _PAGE_CACHE_WC && (flags == _PAGE_CACHE_WC &&
new_flags == _PAGE_CACHE_WB)) { new_flags == _PAGE_CACHE_WB)) {
free_memtype(addr, addr+len); free_memtype(addr, addr+len);