mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 01:34:14 +08:00
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Ingo Molnar: "Misc changes: - optimize (reduce) IRQ handler tracing overhead (Wanpeng Li) - clean up MSR helpers (Borislav Petkov) - fix build warning on some configs (Sebastian Andrzej Siewior)" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/msr: Cleanup/streamline MSR helpers x86/apic: Prevent tracing on apic_msr_write_eoi() x86/msr: Add wrmsr_notrace() x86/apic: Get rid of "warning: 'acpi_ioapic_lock' defined but not used"
This commit is contained in:
commit
4ade5b2268
@ -196,7 +196,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
|
|||||||
|
|
||||||
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
|
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
|
||||||
{
|
{
|
||||||
wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
|
wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 native_apic_msr_read(u32 reg)
|
static inline u32 native_apic_msr_read(u32 reg)
|
||||||
@ -332,6 +332,7 @@ struct apic {
|
|||||||
* on write for EOI.
|
* on write for EOI.
|
||||||
*/
|
*/
|
||||||
void (*eoi_write)(u32 reg, u32 v);
|
void (*eoi_write)(u32 reg, u32 v);
|
||||||
|
void (*native_eoi_write)(u32 reg, u32 v);
|
||||||
u64 (*icr_read)(void);
|
u64 (*icr_read)(void);
|
||||||
void (*icr_write)(u32 low, u32 high);
|
void (*icr_write)(u32 low, u32 high);
|
||||||
void (*wait_icr_idle)(void);
|
void (*wait_icr_idle)(void);
|
||||||
|
@ -70,14 +70,14 @@ extern struct tracepoint __tracepoint_read_msr;
|
|||||||
extern struct tracepoint __tracepoint_write_msr;
|
extern struct tracepoint __tracepoint_write_msr;
|
||||||
extern struct tracepoint __tracepoint_rdpmc;
|
extern struct tracepoint __tracepoint_rdpmc;
|
||||||
#define msr_tracepoint_active(t) static_key_false(&(t).key)
|
#define msr_tracepoint_active(t) static_key_false(&(t).key)
|
||||||
extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
|
extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
|
||||||
extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
|
extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
|
||||||
extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
|
extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
|
||||||
#else
|
#else
|
||||||
#define msr_tracepoint_active(t) false
|
#define msr_tracepoint_active(t) false
|
||||||
static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
|
static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
|
||||||
static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
|
static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
|
||||||
static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
|
static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline unsigned long long native_read_msr(unsigned int msr)
|
static inline unsigned long long native_read_msr(unsigned int msr)
|
||||||
@ -115,22 +115,36 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Can be uninlined because referenced by paravirt */
|
/* Can be uninlined because referenced by paravirt */
|
||||||
notrace static inline void native_write_msr(unsigned int msr,
|
static inline void notrace
|
||||||
unsigned low, unsigned high)
|
__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
|
||||||
{
|
{
|
||||||
asm volatile("1: wrmsr\n"
|
asm volatile("1: wrmsr\n"
|
||||||
"2:\n"
|
"2:\n"
|
||||||
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
|
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
|
||||||
: : "c" (msr), "a"(low), "d" (high) : "memory");
|
: : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Can be uninlined because referenced by paravirt */
|
||||||
|
static inline void notrace
|
||||||
|
native_write_msr(unsigned int msr, u32 low, u32 high)
|
||||||
|
{
|
||||||
|
__native_write_msr_notrace(msr, low, high);
|
||||||
if (msr_tracepoint_active(__tracepoint_write_msr))
|
if (msr_tracepoint_active(__tracepoint_write_msr))
|
||||||
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
|
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
wrmsr_notrace(unsigned int msr, u32 low, u32 high)
|
||||||
|
{
|
||||||
|
__native_write_msr_notrace(msr, low, high);
|
||||||
|
}
|
||||||
|
|
||||||
/* Can be uninlined because referenced by paravirt */
|
/* Can be uninlined because referenced by paravirt */
|
||||||
notrace static inline int native_write_msr_safe(unsigned int msr,
|
static inline int notrace
|
||||||
unsigned low, unsigned high)
|
native_write_msr_safe(unsigned int msr, u32 low, u32 high)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
|
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
|
||||||
"1:\n\t"
|
"1:\n\t"
|
||||||
".section .fixup,\"ax\"\n\t"
|
".section .fixup,\"ax\"\n\t"
|
||||||
@ -223,7 +237,7 @@ do { \
|
|||||||
(void)((high) = (u32)(__val >> 32)); \
|
(void)((high) = (u32)(__val >> 32)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
static inline void wrmsr(unsigned int msr, u32 low, u32 high)
|
||||||
{
|
{
|
||||||
native_write_msr(msr, low, high);
|
native_write_msr(msr, low, high);
|
||||||
}
|
}
|
||||||
@ -231,13 +245,13 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
|||||||
#define rdmsrl(msr, val) \
|
#define rdmsrl(msr, val) \
|
||||||
((val) = native_read_msr((msr)))
|
((val) = native_read_msr((msr)))
|
||||||
|
|
||||||
static inline void wrmsrl(unsigned msr, u64 val)
|
static inline void wrmsrl(unsigned int msr, u64 val)
|
||||||
{
|
{
|
||||||
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
|
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wrmsr with exception handling */
|
/* wrmsr with exception handling */
|
||||||
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
|
||||||
{
|
{
|
||||||
return native_write_msr_safe(msr, low, high);
|
return native_write_msr_safe(msr, low, high);
|
||||||
}
|
}
|
||||||
@ -252,7 +266,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
|||||||
__err; \
|
__err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -325,12 +339,12 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
|||||||
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||||
struct msr *msrs)
|
struct msr *msrs)
|
||||||
{
|
{
|
||||||
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
|
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
|
||||||
}
|
}
|
||||||
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||||
struct msr *msrs)
|
struct msr *msrs)
|
||||||
{
|
{
|
||||||
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
|
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
|
||||||
}
|
}
|
||||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
|
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
|
||||||
u32 *l, u32 *h)
|
u32 *l, u32 *h)
|
||||||
|
@ -76,6 +76,7 @@ int acpi_fix_pin2_polarity __initdata;
|
|||||||
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
/*
|
/*
|
||||||
* Locks related to IOAPIC hotplug
|
* Locks related to IOAPIC hotplug
|
||||||
* Hotplug side:
|
* Hotplug side:
|
||||||
@ -88,6 +89,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
|||||||
* ->ioapic_lock
|
* ->ioapic_lock
|
||||||
*/
|
*/
|
||||||
static DEFINE_MUTEX(acpi_ioapic_lock);
|
static DEFINE_MUTEX(acpi_ioapic_lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------
|
/* --------------------------------------------------------------------------
|
||||||
Boot-time Configuration
|
Boot-time Configuration
|
||||||
|
@ -2263,6 +2263,7 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
|
|||||||
for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
|
for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
|
||||||
/* Should happen once for each apic */
|
/* Should happen once for each apic */
|
||||||
WARN_ON((*drv)->eoi_write == eoi_write);
|
WARN_ON((*drv)->eoi_write == eoi_write);
|
||||||
|
(*drv)->native_eoi_write = (*drv)->eoi_write;
|
||||||
(*drv)->eoi_write = eoi_write;
|
(*drv)->eoi_write = eoi_write;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,7 @@ static void kvm_register_steal_time(void)
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
|
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
|
||||||
|
|
||||||
static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
||||||
{
|
{
|
||||||
/**
|
/**
|
||||||
* This relies on __test_and_clear_bit to modify the memory
|
* This relies on __test_and_clear_bit to modify the memory
|
||||||
@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
|||||||
*/
|
*/
|
||||||
if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
|
if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
|
||||||
return;
|
return;
|
||||||
apic_write(APIC_EOI, APIC_EOI_ACK);
|
apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_guest_cpu_init(void)
|
static void kvm_guest_cpu_init(void)
|
||||||
|
@ -261,10 +261,8 @@ static inline void __smp_reschedule_interrupt(void)
|
|||||||
|
|
||||||
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
|
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
irq_enter();
|
|
||||||
ack_APIC_irq();
|
ack_APIC_irq();
|
||||||
__smp_reschedule_interrupt();
|
__smp_reschedule_interrupt();
|
||||||
irq_exit();
|
|
||||||
/*
|
/*
|
||||||
* KVM uses this interrupt to force a cpu out of guest mode
|
* KVM uses this interrupt to force a cpu out of guest mode
|
||||||
*/
|
*/
|
||||||
|
@ -113,14 +113,14 @@ int msr_clear_bit(u32 msr, u8 bit)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRACEPOINTS
|
#ifdef CONFIG_TRACEPOINTS
|
||||||
void do_trace_write_msr(unsigned msr, u64 val, int failed)
|
void do_trace_write_msr(unsigned int msr, u64 val, int failed)
|
||||||
{
|
{
|
||||||
trace_write_msr(msr, val, failed);
|
trace_write_msr(msr, val, failed);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(do_trace_write_msr);
|
EXPORT_SYMBOL(do_trace_write_msr);
|
||||||
EXPORT_TRACEPOINT_SYMBOL(write_msr);
|
EXPORT_TRACEPOINT_SYMBOL(write_msr);
|
||||||
|
|
||||||
void do_trace_read_msr(unsigned msr, u64 val, int failed)
|
void do_trace_read_msr(unsigned int msr, u64 val, int failed)
|
||||||
{
|
{
|
||||||
trace_read_msr(msr, val, failed);
|
trace_read_msr(msr, val, failed);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user