2012-03-05 19:49:28 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/processor.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-1999 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_PROCESSOR_H
|
|
|
|
#define __ASM_PROCESSOR_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default implementation of macro that returns current
|
|
|
|
* instruction pointer ("program counter").
|
|
|
|
*/
|
|
|
|
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
#include <linux/string.h>
|
|
|
|
|
|
|
|
#include <asm/fpsimd.h>
|
|
|
|
#include <asm/hw_breakpoint.h>
|
2015-01-06 08:38:41 +08:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2012-03-05 19:49:28 +08:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/types.h>
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#define STACK_TOP_MAX TASK_SIZE_64
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define AARCH32_VECTORS_BASE 0xffff0000
|
|
|
|
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
|
|
|
|
AARCH32_VECTORS_BASE : STACK_TOP_MAX)
|
|
|
|
#else
|
|
|
|
#define STACK_TOP STACK_TOP_MAX
|
|
|
|
#endif /* CONFIG_COMPAT */
|
2012-11-09 00:00:16 +08:00
|
|
|
|
2015-02-06 02:01:53 +08:00
|
|
|
extern phys_addr_t arm64_dma_phys_limit;
|
|
|
|
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
|
2012-03-05 19:49:28 +08:00
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
struct debug_info {
|
|
|
|
/* Have we suspended stepping by a debugger? */
|
|
|
|
int suspended_step;
|
|
|
|
/* Allow breakpoints and watchpoints to be disabled for this thread. */
|
|
|
|
int bps_disabled;
|
|
|
|
int wps_disabled;
|
|
|
|
/* Hardware breakpoints pinned to this task. */
|
|
|
|
struct perf_event *hbp_break[ARM_MAX_BRP];
|
|
|
|
struct perf_event *hbp_watch[ARM_MAX_WRP];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cpu_context {
|
|
|
|
unsigned long x19;
|
|
|
|
unsigned long x20;
|
|
|
|
unsigned long x21;
|
|
|
|
unsigned long x22;
|
|
|
|
unsigned long x23;
|
|
|
|
unsigned long x24;
|
|
|
|
unsigned long x25;
|
|
|
|
unsigned long x26;
|
|
|
|
unsigned long x27;
|
|
|
|
unsigned long x28;
|
|
|
|
unsigned long fp;
|
|
|
|
unsigned long sp;
|
|
|
|
unsigned long pc;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct thread_struct {
|
|
|
|
struct cpu_context cpu_context; /* cpu context */
|
2015-05-27 22:39:40 +08:00
|
|
|
unsigned long tp_value; /* TLS register */
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
unsigned long tp2_value;
|
|
|
|
#endif
|
2012-03-05 19:49:28 +08:00
|
|
|
struct fpsimd_state fpsimd_state;
|
|
|
|
unsigned long fault_address; /* fault info */
|
2014-04-07 06:04:12 +08:00
|
|
|
unsigned long fault_code; /* ESR_EL1 value */
|
2012-03-05 19:49:28 +08:00
|
|
|
struct debug_info debug; /* debugging */
|
|
|
|
};
|
|
|
|
|
2015-05-27 22:39:40 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define task_user_tls(t) \
|
|
|
|
({ \
|
|
|
|
unsigned long *__tls; \
|
|
|
|
if (is_compat_thread(task_thread_info(t))) \
|
|
|
|
__tls = &(t)->thread.tp2_value; \
|
|
|
|
else \
|
|
|
|
__tls = &(t)->thread.tp_value; \
|
|
|
|
__tls; \
|
|
|
|
})
|
|
|
|
#else
|
|
|
|
#define task_user_tls(t) (&(t)->thread.tp_value)
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 19:49:28 +08:00
|
|
|
#define INIT_THREAD { }
|
|
|
|
|
|
|
|
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
|
|
|
|
{
|
|
|
|
memset(regs, 0, sizeof(*regs));
|
|
|
|
regs->syscallno = ~0UL;
|
|
|
|
regs->pc = pc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
|
|
|
|
unsigned long sp)
|
|
|
|
{
|
|
|
|
start_thread_common(regs, pc);
|
|
|
|
regs->pstate = PSR_MODE_EL0t;
|
|
|
|
regs->sp = sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
|
|
|
unsigned long sp)
|
|
|
|
{
|
|
|
|
start_thread_common(regs, pc);
|
|
|
|
regs->pstate = COMPAT_PSR_MODE_USR;
|
|
|
|
if (pc & 1)
|
|
|
|
regs->pstate |= COMPAT_PSR_T_BIT;
|
2013-10-11 21:52:12 +08:00
|
|
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
regs->pstate |= COMPAT_PSR_E_BIT;
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 19:49:28 +08:00
|
|
|
regs->compat_sp = sp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
|
|
struct task_struct;
|
|
|
|
|
|
|
|
/* Free all resources held by a thread. */
|
|
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
|
2015-03-03 03:19:14 +08:00
|
|
|
static inline void cpu_relax(void)
|
|
|
|
{
|
|
|
|
asm volatile("yield" ::: "memory");
|
|
|
|
}
|
|
|
|
|
arch, locking: Ciao arch_mutex_cpu_relax()
The arch_mutex_cpu_relax() function, introduced by 34b133f, is
hacky and ugly. It was added a few years ago to address the fact
that common cpu_relax() calls include yielding on s390, and thus
impact the optimistic spinning functionality of mutexes. Nowadays
we use this function well beyond mutexes: rwsem, qrwlock, mcs and
lockref. Since the macro that defines the call is in the mutex header,
any users must include mutex.h and the naming is misleading as well.
This patch (i) renames the call to cpu_relax_lowlatency ("relax, but
only if you can do it with very low latency") and (ii) defines it in
each arch's asm/processor.h local header, just like for regular cpu_relax
functions. On all archs, except s390, cpu_relax_lowlatency is simply cpu_relax,
and thus we can take it out of mutex.h. While this can seem redundant,
I believe it is a good choice as it allows us to move out arch specific
logic from generic locking primitives and enables future(?) archs to
transparently define it, similarly to System Z.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Bharat Bhushan <r65777@freescale.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Howells <dhowells@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Joseph Myers <joseph@codesourcery.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Lennox Wu <lennox.wu@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Qiaowei Ren <qiaowei.ren@intel.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Stratos Karafotis <stratosk@semaphore.gr>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Kulikov <segoon@openwall.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux390@de.ibm.com
Cc: linux-alpha@vger.kernel.org
Cc: linux-am33-list@redhat.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-c6x-dev@linux-c6x.org
Cc: linux-cris-kernel@axis.com
Cc: linux-hexagon@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux@lists.openrisc.net
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-metag@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linux-parisc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1404079773.2619.4.camel@buesod1.americas.hpqcorp.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-06-30 06:09:33 +08:00
|
|
|
#define cpu_relax_lowlatency() cpu_relax()
|
2012-03-05 19:49:28 +08:00
|
|
|
|
|
|
|
/* Thread switching */
|
|
|
|
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
|
|
|
struct task_struct *next);
|
|
|
|
|
|
|
|
#define task_pt_regs(p) \
|
|
|
|
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
|
|
|
|
2014-07-10 18:37:40 +08:00
|
|
|
#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
|
2014-08-29 23:11:10 +08:00
|
|
|
#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
|
2012-03-05 19:49:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefetching support
|
|
|
|
*/
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
|
|
static inline void prefetch(const void *ptr)
|
|
|
|
{
|
|
|
|
asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
|
|
static inline void prefetchw(const void *ptr)
|
|
|
|
{
|
|
|
|
asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
|
|
|
static inline void spin_lock_prefetch(const void *x)
|
|
|
|
{
|
|
|
|
prefetchw(x);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
arm64: Delay cpu feature capability checks
At the moment we run through the arm64_features capability list for
each CPU and set the capability if one of the CPU supports it. This
could be problematic in a heterogeneous system with differing capabilities.
Delay the CPU feature checks until all the enabled CPUs are up(i.e,
smp_cpus_done(), so that we can make better decisions based on the
overall system capability. Once we decide and advertise the capabilities
the alternatives can be applied. From this state, we cannot roll back
a feature to disabled based on the values from a new hotplugged CPU,
due to the runtime patching and other reasons. So, for all new CPUs,
we need to make sure that they have the established system capabilities.
Failing which, we bring the CPU down, preventing it from turning online.
Once the capabilities are decided, any new CPU booting up goes through
verification to ensure that it has all the enabled capabilities and also
invokes the respective enable() method on the CPU.
The CPU errata checks are not delayed and is still executed per-CPU
to detect the respective capabilities. If we ever come across a non-errata
capability that needs to be checked on each-CPU, we could introduce them via
a new capability table(or introduce a flag), which can be processed per CPU.
The next patch will make the feature checks use the system wide
safe value of a feature register.
NOTE: The enable() methods associated with the capability is scheduled
on all the CPUs (which is the only use case at the moment). If we need
a different type of 'enable()' which only needs to be run once on any CPU,
we should be able to handle that when needed.
Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com>
Tested-by: Dave Martin <Dave.Martin@arm.com>
[catalin.marinas@arm.com: static variable and coding style fixes]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-10-19 21:24:50 +08:00
|
|
|
void cpu_enable_pan(void *__unused);
|
2015-07-23 02:05:54 +08:00
|
|
|
|
2012-03-05 19:49:28 +08:00
|
|
|
#endif /* __ASM_PROCESSOR_H */
|