2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 updates from Thomas Gleixner:
 "A pile of regression fixes and updates:

   - address the fallout of the patches which made the cpuid - nodeid
     relation permanent: Handling of invalid APIC ids and preventing
     pointless warning messages.

   - force eager FPU when protection keys are enabled. Protection keys
     are not generating FPU exceptions so they cannot work with the lazy
     FPU mechanism.

   - prevent force migration of interrupts which are not part of the CPU
     vector domain.

   - handle the fact that APIC ids are not updated in the ACPI/MADT
     tables on physical CPU hotplug

   - remove bash-isms from syscall table generator script

   - use the hypervisor supplied APIC frequency when running on VMware"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/pkeys: Make protection keys an "eager" feature
  x86/apic: Prevent pointless warning messages
  x86/acpi: Prevent LAPIC id 0xff from being accounted
  arch/x86: Handle non enumerated CPU after physical hotplug
  x86/unwind: Fix oprofile module link error
  x86/vmware: Skip lapic calibration on VMware
  x86/syscalls: Remove bash-isms in syscall table generator
  x86/irq: Prevent force migration of irqs which are not in the vector domain
This commit is contained in:
Linus Torvalds 2016-10-10 10:59:07 -07:00
commit 5fa0eb0b4d
9 changed files with 79 additions and 32 deletions

View File

@ -10,8 +10,11 @@ syscall_macro() {
# Entry can be either just a function name or "function/qualifier"
real_entry="${entry%%/*}"
qualifier="${entry:${#real_entry}}" # Strip the function name
qualifier="${qualifier:1}" # Strip the slash, if any
if [ "$entry" = "$real_entry" ]; then
qualifier=
else
qualifier=${entry#*/}
fi
echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
}
@ -22,7 +25,7 @@ emit() {
entry="$3"
compat="$4"
if [ "$abi" == "64" -a -n "$compat" ]; then
if [ "$abi" = "64" -a -n "$compat" ]; then
echo "a compat entry for a 64-bit syscall makes no sense" >&2
exit 1
fi
@ -45,17 +48,17 @@ emit() {
grep '^[0-9]' "$in" | sort -n | (
while read nr abi name entry compat; do
abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
if [ "$abi" = "COMMON" -o "$abi" = "64" ]; then
# COMMON is the same as 64, except that we don't expect X32
# programs to use it. Our expectation has nothing to do with
# any generated code, so treat them the same.
emit 64 "$nr" "$entry" "$compat"
elif [ "$abi" == "X32" ]; then
elif [ "$abi" = "X32" ]; then
# X32 is equivalent to 64 on an X32-compatible kernel.
echo "#ifdef CONFIG_X86_X32_ABI"
emit 64 "$nr" "$entry" "$compat"
echo "#endif"
elif [ "$abi" == "I386" ]; then
elif [ "$abi" = "I386" ]; then
emit "$abi" "$nr" "$entry" "$compat"
else
echo "Unknown abi $abi" >&2

View File

@ -27,11 +27,12 @@
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | \
XFEATURE_MASK_PKRU)
XFEATURE_MASK_Hi16_ZMM)
/* Supported features which require eager state saving */
#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_PKRU)
/* All currently supported features */
#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)

View File

@ -23,6 +23,8 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
bool unwind_next_frame(struct unwind_state *state);
unsigned long unwind_get_return_address(struct unwind_state *state);
static inline bool unwind_done(struct unwind_state *state)
{
return state->stack_info.type == STACK_TYPE_UNKNOWN;
@ -48,8 +50,6 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
return state->bp + 1;
}
unsigned long unwind_get_return_address(struct unwind_state *state);
#else /* !CONFIG_FRAME_POINTER */
static inline
@ -58,16 +58,6 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
return NULL;
}
static inline
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
*state->sp, state->sp);
}
#endif /* CONFIG_FRAME_POINTER */
#endif /* _ASM_X86_UNWIND_H */

View File

@ -233,6 +233,10 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
acpi_table_print_madt_entry(header);
/* Ignore invalid ID */
if (processor->id == 0xff)
return 0;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size

View File

@ -2128,9 +2128,11 @@ int __generic_processor_info(int apicid, int version, bool enabled)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
pr_warning(
"APIC: NR_CPUS/possible_cpus limit of %i reached."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
if (enabled) {
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
"reached. Processor %d/0x%x ignored.\n",
max, thiscpu, apicid);
}
disabled_cpus++;
return -EINVAL;

View File

@ -661,11 +661,28 @@ void irq_complete_move(struct irq_cfg *cfg)
*/
void irq_force_complete_move(struct irq_desc *desc)
{
struct irq_data *irqdata = irq_desc_get_irq_data(desc);
struct apic_chip_data *data = apic_chip_data(irqdata);
struct irq_cfg *cfg = data ? &data->cfg : NULL;
struct irq_data *irqdata;
struct apic_chip_data *data;
struct irq_cfg *cfg;
unsigned int cpu;
/*
* The function is called for all descriptors regardless of which
* irqdomain they belong to. For example if an IRQ is provided by
* an irq_chip as part of a GPIO driver, the chip data for that
* descriptor is specific to the irq_chip in question.
*
* Check first that the chip_data is what we expect
* (apic_chip_data) before touching it any further.
*/
irqdata = irq_domain_get_irq_data(x86_vector_domain,
irq_desc_get_irq(desc));
if (!irqdata)
return;
data = apic_chip_data(irqdata);
cfg = data ? &data->cfg : NULL;
if (!cfg)
return;

View File

@ -27,6 +27,7 @@
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <asm/apic.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@ -82,10 +83,17 @@ static void __init vmware_platform_setup(void)
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
if (ebx != UINT_MAX)
if (ebx != UINT_MAX) {
x86_platform.calibrate_tsc = vmware_get_tsc_khz;
else
#ifdef CONFIG_X86_LOCAL_APIC
/* Skip lapic calibration since we know the bus frequency. */
lapic_timer_frequency = ecx / HZ;
pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
ecx);
#endif
} else {
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
}
/*

View File

@ -1407,9 +1407,21 @@ __init void prefill_possible_map(void)
{
int i, possible;
/* no processor from mptable or madt */
if (!num_processors)
num_processors = 1;
/* No boot processor was found in mptable or ACPI MADT */
if (!num_processors) {
int apicid = boot_cpu_physical_apicid;
int cpu = hard_smp_processor_id();
pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
/* Make sure boot cpu is enumerated */
if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
apic->apic_id_valid(apicid))
generic_processor_info(apicid, boot_cpu_apic_version);
if (!num_processors)
num_processors = 1;
}
i = setup_max_cpus ?: 1;
if (setup_possible_cpus == -1) {

View File

@ -5,6 +5,16 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
*state->sp, state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;