mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
Merge branches 'perf-urgent-for-linus', 'sched-urgent-for-linus', 'timers-urgent-for-linus' and 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: tools/perf: Fix static build of perf tool tracing: Fix regression in printk_formats file * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: generic-ipi: Fix kexec boot crash by initializing call_single_queue before enabling interrupts * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: clocksource: Make watchdog robust vs. interruption timerfd: Fix wakeup of processes when timer is cancelled on clock change * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, MAINTAINERS: Add x86 MCE people x86, efi: Do not reserve boot services regions within reserved areas
This commit is contained in:
commit
8816ead9d8
@ -7007,6 +7007,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
|
||||
S: Maintained
|
||||
F: drivers/platform/x86
|
||||
|
||||
X86 MCE INFRASTRUCTURE
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
M: Borislav Petkov <bp@amd64.org>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: arch/x86/kernel/cpu/mcheck/*
|
||||
|
||||
XEN HYPERVISOR INTERFACE
|
||||
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
|
@ -4,7 +4,6 @@
|
||||
#define ARCH_DISCARD_MEMBLOCK
|
||||
|
||||
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
|
||||
void memblock_x86_to_bootmem(u64 start, u64 end);
|
||||
|
||||
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
||||
void memblock_x86_free_range(u64 start, u64 end);
|
||||
@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
|
||||
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
|
||||
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
|
||||
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
|
||||
bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
|
||||
|
||||
#endif
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <linux/range.h>
|
||||
|
||||
/* Check for already reserved areas */
|
||||
static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
|
||||
bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
|
||||
{
|
||||
struct memblock_region *r;
|
||||
u64 addr = *addrp, last;
|
||||
@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
|
||||
if (addr >= ei_last)
|
||||
continue;
|
||||
*sizep = ei_last - addr;
|
||||
while (check_with_memblock_reserved_size(&addr, sizep, align))
|
||||
while (memblock_x86_check_reserved_size(&addr, sizep, align))
|
||||
;
|
||||
|
||||
if (*sizep)
|
||||
|
@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
|
||||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
efi_memory_desc_t *md = p;
|
||||
unsigned long long start = md->phys_addr;
|
||||
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
u64 start = md->phys_addr;
|
||||
u64 size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
|
||||
if (md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
continue;
|
||||
|
||||
memblock_x86_reserve_range(start, start + size, "EFI Boot");
|
||||
/* Only reserve where possible:
|
||||
* - Not within any already allocated areas
|
||||
* - Not over any memory area (really needed, if above?)
|
||||
* - Not within any part of the kernel
|
||||
* - Not the bios reserved area
|
||||
*/
|
||||
if ((start+size >= virt_to_phys(_text)
|
||||
&& start <= virt_to_phys(_end)) ||
|
||||
!e820_all_mapped(start, start+size, E820_RAM) ||
|
||||
memblock_x86_check_reserved_size(&start, &size,
|
||||
1<<EFI_PAGE_SHIFT)) {
|
||||
/* Could not reserve, skip it */
|
||||
md->num_pages = 0;
|
||||
memblock_dbg(PFX "Could not reserve boot range "
|
||||
"[0x%010llx-0x%010llx]\n",
|
||||
start, start+size-1);
|
||||
} else
|
||||
memblock_x86_reserve_range(start, start+size,
|
||||
"EFI Boot");
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
continue;
|
||||
|
||||
/* Could not reserve boot area */
|
||||
if (!size)
|
||||
continue;
|
||||
|
||||
free_bootmem_late(start, size);
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
|
||||
|
||||
/*
|
||||
* Called when the clock was set to cancel the timers in the cancel
|
||||
* list.
|
||||
* list. This will wake up processes waiting on these timers. The
|
||||
* wake-up requires ctx->ticks to be non zero, therefore we increment
|
||||
* it before calling wake_up_locked().
|
||||
*/
|
||||
void timerfd_clock_was_set(void)
|
||||
{
|
||||
@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
|
||||
spin_lock_irqsave(&ctx->wqh.lock, flags);
|
||||
if (ctx->moffs.tv64 != moffs.tv64) {
|
||||
ctx->moffs.tv64 = KTIME_MAX;
|
||||
ctx->ticks++;
|
||||
wake_up_locked(&ctx->wqh);
|
||||
}
|
||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||
|
@ -188,6 +188,7 @@ struct clocksource {
|
||||
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
||||
/* Watchdog related data, used by the framework */
|
||||
struct list_head wd_list;
|
||||
cycle_t cs_last;
|
||||
cycle_t wd_last;
|
||||
#endif
|
||||
} ____cacheline_aligned;
|
||||
|
@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
|
||||
* Generic and arch helpers
|
||||
*/
|
||||
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
||||
void __init call_function_init(void);
|
||||
void generic_smp_call_function_single_interrupt(void);
|
||||
void generic_smp_call_function_interrupt(void);
|
||||
void ipi_call_lock(void);
|
||||
void ipi_call_unlock(void);
|
||||
void ipi_call_lock_irq(void);
|
||||
void ipi_call_unlock_irq(void);
|
||||
#else
|
||||
static inline void call_function_init(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
|
||||
#define smp_prepare_boot_cpu() do {} while (0)
|
||||
#define smp_call_function_many(mask, func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
static inline void init_call_single_data(void) { }
|
||||
static inline void call_function_init(void) { }
|
||||
|
||||
static inline int
|
||||
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
||||
|
@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
|
||||
timekeeping_init();
|
||||
time_init();
|
||||
profile_init();
|
||||
call_function_init();
|
||||
if (!irqs_disabled())
|
||||
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
|
||||
"enabled early\n");
|
||||
|
@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
|
||||
.notifier_call = hotplug_cfd,
|
||||
};
|
||||
|
||||
static int __cpuinit init_call_single_data(void)
|
||||
void __init call_function_init(void)
|
||||
{
|
||||
void *cpu = (void *)(long)smp_processor_id();
|
||||
int i;
|
||||
@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
|
||||
|
||||
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
|
||||
register_cpu_notifier(&hotplug_cfd_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_call_single_data);
|
||||
|
||||
/*
|
||||
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
|
||||
|
@ -185,7 +185,6 @@ static struct clocksource *watchdog;
|
||||
static struct timer_list watchdog_timer;
|
||||
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
|
||||
static DEFINE_SPINLOCK(watchdog_lock);
|
||||
static cycle_t watchdog_last;
|
||||
static int watchdog_running;
|
||||
|
||||
static int clocksource_watchdog_kthread(void *data);
|
||||
@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
|
||||
if (!watchdog_running)
|
||||
goto out;
|
||||
|
||||
wdnow = watchdog->read(watchdog);
|
||||
wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
|
||||
watchdog->mult, watchdog->shift);
|
||||
watchdog_last = wdnow;
|
||||
|
||||
list_for_each_entry(cs, &watchdog_list, wd_list) {
|
||||
|
||||
/* Clocksource already marked unstable? */
|
||||
@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
|
||||
continue;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
csnow = cs->read(cs);
|
||||
wdnow = watchdog->read(watchdog);
|
||||
local_irq_enable();
|
||||
|
||||
/* Clocksource initialized ? */
|
||||
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
|
||||
cs->flags |= CLOCK_SOURCE_WATCHDOG;
|
||||
cs->wd_last = csnow;
|
||||
cs->wd_last = wdnow;
|
||||
cs->cs_last = csnow;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check the deviation from the watchdog clocksource. */
|
||||
cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
|
||||
wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
|
||||
watchdog->mult, watchdog->shift);
|
||||
|
||||
cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
|
||||
cs->mask, cs->mult, cs->shift);
|
||||
cs->wd_last = csnow;
|
||||
cs->cs_last = csnow;
|
||||
cs->wd_last = wdnow;
|
||||
|
||||
/* Check the deviation from the watchdog clocksource. */
|
||||
if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
|
||||
clocksource_unstable(cs, cs_nsec - wd_nsec);
|
||||
continue;
|
||||
@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
|
||||
return;
|
||||
init_timer(&watchdog_timer);
|
||||
watchdog_timer.function = clocksource_watchdog;
|
||||
watchdog_last = watchdog->read(watchdog);
|
||||
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
|
||||
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
|
||||
watchdog_running = 1;
|
||||
|
@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
|
||||
const char **fmt = v;
|
||||
int start_index;
|
||||
|
||||
if (!fmt)
|
||||
fmt = __start___trace_bprintk_fmt + *pos;
|
||||
|
||||
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
|
||||
|
||||
if (*pos < start_index)
|
||||
return fmt;
|
||||
return __start___trace_bprintk_fmt + *pos;
|
||||
|
||||
return find_next_mod_format(start_index, v, fmt, pos);
|
||||
}
|
||||
|
@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
|
||||
|
||||
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
|
||||
|
||||
LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
|
||||
LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
|
||||
|
||||
ALL_CFLAGS += $(BASIC_CFLAGS)
|
||||
ALL_CFLAGS += $(ARCH_CFLAGS)
|
||||
|
Loading…
Reference in New Issue
Block a user