mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 18:24:14 +08:00
75ef82190d
As result of commit "x86/xen: Avoid fast syscall path for Xen PV guests", usergs_sysret32 pv op is not called by Xen PV guests anymore and since they were the only ones who used it we can safely remove it. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Borislav Petkov <bp@suse.de> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: david.vrabel@citrix.com Cc: konrad.wilk@oracle.com Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1447970147-1733-4-git-send-email-boris.ostrovsky@oracle.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
84 lines
2.4 KiB
C
84 lines
2.4 KiB
C
#include <asm/paravirt.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <linux/stringify.h>
|
|
|
|
DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
|
|
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
|
|
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
|
|
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
|
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
|
|
DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
|
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
|
|
|
|
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
|
|
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
|
|
|
|
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
|
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
|
|
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
|
|
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
|
|
#endif
|
|
|
|
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
|
{
|
|
return paravirt_patch_insns(insnbuf, len,
|
|
start__mov32, end__mov32);
|
|
}
|
|
|
|
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
|
{
|
|
return paravirt_patch_insns(insnbuf, len,
|
|
start__mov64, end__mov64);
|
|
}
|
|
|
|
extern bool pv_is_native_spin_unlock(void);
|
|
|
|
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|
unsigned long addr, unsigned len)
|
|
{
|
|
const unsigned char *start, *end;
|
|
unsigned ret;
|
|
|
|
#define PATCH_SITE(ops, x) \
|
|
case PARAVIRT_PATCH(ops.x): \
|
|
start = start_##ops##_##x; \
|
|
end = end_##ops##_##x; \
|
|
goto patch_site
|
|
switch(type) {
|
|
PATCH_SITE(pv_irq_ops, restore_fl);
|
|
PATCH_SITE(pv_irq_ops, save_fl);
|
|
PATCH_SITE(pv_irq_ops, irq_enable);
|
|
PATCH_SITE(pv_irq_ops, irq_disable);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
|
|
PATCH_SITE(pv_cpu_ops, swapgs);
|
|
PATCH_SITE(pv_mmu_ops, read_cr2);
|
|
PATCH_SITE(pv_mmu_ops, read_cr3);
|
|
PATCH_SITE(pv_mmu_ops, write_cr3);
|
|
PATCH_SITE(pv_cpu_ops, clts);
|
|
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
|
|
PATCH_SITE(pv_cpu_ops, wbinvd);
|
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
|
|
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
|
|
if (pv_is_native_spin_unlock()) {
|
|
start = start_pv_lock_ops_queued_spin_unlock;
|
|
end = end_pv_lock_ops_queued_spin_unlock;
|
|
goto patch_site;
|
|
}
|
|
#endif
|
|
|
|
default:
|
|
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
|
break;
|
|
|
|
patch_site:
|
|
ret = paravirt_patch_insns(ibuf, len, start, end);
|
|
break;
|
|
}
|
|
#undef PATCH_SITE
|
|
return ret;
|
|
}
|