x86/mm/cpa: Fix set_mce_nospec()

The recent commit fe0937b24f ("x86/mm/cpa: Fold cpa_flush_range() and
cpa_flush_array() into a single cpa_flush() function") accidentally made
the call to make_addr_canonical_again() go away, which breaks
set_mce_nospec().

Re-instate the call to convert the address back into canonical form right
before invoking either CLFLUSH or INVLPG. Rename the function while at it
to be shorter (and less MAGA).

Fixes: fe0937b24f ("x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function")
Reported-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Tony Luck <tony.luck@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Rik van Riel <riel@surriel.com>
Link: https://lkml.kernel.org/r/20190208120859.GH32511@hirez.programming.kicks-ass.net
This commit is contained in:
Peter Zijlstra 2019-02-08 13:08:59 +01:00 committed by Thomas Gleixner
parent 45b13b424f
commit 0521e8be21

View File

@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
#endif
/*
* See set_mce_nospec().
*
* Machine check recovery code needs to change cache mode of poisoned pages to
* UC to avoid speculative access logging another error. But passing the
* address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
* speculative access. So we cheat and flip the top bit of the address. This
* works fine for the code that updates the page tables. But at the end of the
* process we need to flush the TLB and cache and the non-canonical address
* causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long fix_addr(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
{
if (cpa->flags & CPA_PAGES_ARRAY) {
@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
__flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static void cpa_flush(struct cpa_data *data, int cache)
@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
clflush_cache_range_opt((void *)addr, PAGE_SIZE);
clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
}
mb();
}
@ -1627,29 +1650,6 @@ out:
return ret;
}
/*
* Machine check recovery code needs to change cache mode of poisoned
* pages to UC to avoid speculative access logging another error. But
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
* way to encourage a speculative access. So we cheat and flip the top
* bit of the address. This works fine for the code that updates the
* page tables. But at the end of the process we need to flush the cache
* and the non-canonical address causes a #GP fault when used by the
* CLFLUSH instruction.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long make_addr_canonical_again(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag,