mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
Merging v3.10-rc2 as I need to apply a fix for
3cc8e40e8f
"xen/arm: rename xen_secondary_init and run it on every online cpu"
The commit is in v3.10-rc2, the current branch is based on v3.10-rc1.
This commit is contained in:
commit
088eef2219
@ -191,9 +191,11 @@ Linux it will look something like this:
|
||||
};
|
||||
|
||||
The bootargs property contains the kernel arguments, and the initrd-*
|
||||
properties define the address and size of an initrd blob. The
|
||||
chosen node may also optionally contain an arbitrary number of
|
||||
additional properties for platform-specific configuration data.
|
||||
properties define the address and size of an initrd blob. Note that
|
||||
initrd-end is the first address after the initrd image, so this doesn't
|
||||
match the usual semantic of struct resource. The chosen node may also
|
||||
optionally contain an arbitrary number of additional properties for
|
||||
platform-specific configuration data.
|
||||
|
||||
During early boot, the architecture setup code calls of_scan_flat_dt()
|
||||
several times with different helper callbacks to parse device tree
|
||||
|
202
Documentation/kernel-per-CPU-kthreads.txt
Normal file
202
Documentation/kernel-per-CPU-kthreads.txt
Normal file
@ -0,0 +1,202 @@
|
||||
REDUCING OS JITTER DUE TO PER-CPU KTHREADS
|
||||
|
||||
This document lists per-CPU kthreads in the Linux kernel and presents
|
||||
options to control their OS jitter. Note that non-per-CPU kthreads are
|
||||
not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
|
||||
them to a "housekeeping" CPU dedicated to such work.
|
||||
|
||||
|
||||
REFERENCES
|
||||
|
||||
o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
|
||||
|
||||
o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
|
||||
|
||||
o man taskset: Using the taskset command to bind tasks to sets
|
||||
of CPUs.
|
||||
|
||||
o man sched_setaffinity: Using the sched_setaffinity() system
|
||||
call to bind tasks to sets of CPUs.
|
||||
|
||||
o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
|
||||
writing "0" to offline and "1" to online.
|
||||
|
||||
o In order to locate kernel-generated OS jitter on CPU N:
|
||||
|
||||
cd /sys/kernel/debug/tracing
|
||||
echo 1 > max_graph_depth # Increase the "1" for more detail
|
||||
echo function_graph > current_tracer
|
||||
# run workload
|
||||
cat per_cpu/cpuN/trace
|
||||
|
||||
|
||||
KTHREADS
|
||||
|
||||
Name: ehca_comp/%u
|
||||
Purpose: Periodically process Infiniband-related work.
|
||||
To reduce its OS jitter, do any of the following:
|
||||
1. Don't use eHCA Infiniband hardware, instead choosing hardware
|
||||
that does not require per-CPU kthreads. This will prevent these
|
||||
kthreads from being created in the first place. (This will
|
||||
work for most people, as this hardware, though important, is
|
||||
relatively old and is produced in relatively low unit volumes.)
|
||||
2. Do all eHCA-Infiniband-related work on other CPUs, including
|
||||
interrupts.
|
||||
3. Rework the eHCA driver so that its per-CPU kthreads are
|
||||
provisioned only on selected CPUs.
|
||||
|
||||
|
||||
Name: irq/%d-%s
|
||||
Purpose: Handle threaded interrupts.
|
||||
To reduce its OS jitter, do the following:
|
||||
1. Use irq affinity to force the irq threads to execute on
|
||||
some other CPU.
|
||||
|
||||
Name: kcmtpd_ctr_%d
|
||||
Purpose: Handle Bluetooth work.
|
||||
To reduce its OS jitter, do one of the following:
|
||||
1. Don't use Bluetooth, in which case these kthreads won't be
|
||||
created in the first place.
|
||||
2. Use irq affinity to force Bluetooth-related interrupts to
|
||||
occur on some other CPU and furthermore initiate all
|
||||
Bluetooth activity on some other CPU.
|
||||
|
||||
Name: ksoftirqd/%u
|
||||
Purpose: Execute softirq handlers when threaded or when under heavy load.
|
||||
To reduce its OS jitter, each softirq vector must be handled
|
||||
separately as follows:
|
||||
TIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by forcing
|
||||
both kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
|
||||
the CPU offline, then bring it back online. This forces
|
||||
recurring timers to migrate elsewhere. If you are concerned
|
||||
with multiple CPUs, force them all offline before bringing the
|
||||
first one back online. Once you have onlined the CPUs in question,
|
||||
do not offline any other CPUs, because doing so could force the
|
||||
timer back onto one of the CPUs in question.
|
||||
NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
|
||||
1. Force networking interrupts onto other CPUs.
|
||||
2. Initiate any network I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O and block-I/O polling on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
TASKLET_SOFTIRQ: Do one or more of the following:
|
||||
1. Avoid use of drivers that use tasklets. (Such drivers will contain
|
||||
calls to things like tasklet_schedule().)
|
||||
2. Convert all drivers that you must use from tasklets to workqueues.
|
||||
3. Force interrupts for drivers using tasklets onto other CPUs,
|
||||
and also do I/O involving these drivers on other CPUs.
|
||||
SCHED_SOFTIRQ: Do all of the following:
|
||||
1. Avoid sending scheduler IPIs to the CPU to be de-jittered,
|
||||
for example, ensure that at most one runnable kthread is present
|
||||
on that CPU. If a thread that expects to run on the de-jittered
|
||||
CPU awakens, the scheduler will send an IPI that can result in
|
||||
a subsequent SCHED_SOFTIRQ.
|
||||
2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using the
|
||||
"nohz_full=" boot parameter. This reduces the number of
|
||||
scheduler-clock interrupts that the de-jittered CPU receives,
|
||||
minimizing its chances of being selected to do the load balancing
|
||||
work that runs in SCHED_SOFTIRQ context.
|
||||
3. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by
|
||||
forcing both kernel threads and interrupts to execute elsewhere.
|
||||
This further reduces the number of scheduler-clock interrupts
|
||||
received by the de-jittered CPU.
|
||||
HRTIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle. For example, avoid system calls and force both
|
||||
kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
|
||||
CPU offline, then bring it back online. This forces recurring
|
||||
timers to migrate elsewhere. If you are concerned with multiple
|
||||
CPUs, force them all offline before bringing the first one
|
||||
back online. Once you have onlined the CPUs in question, do not
|
||||
offline any other CPUs, because doing so could force the timer
|
||||
back onto one of the CPUs in question.
|
||||
RCU_SOFTIRQ: Do at least one of the following:
|
||||
1. Offload callbacks and keep the CPU in either dyntick-idle or
|
||||
adaptive-ticks state by doing all of the following:
|
||||
a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using
|
||||
the "nohz_full=" boot parameter. Bind the rcuo kthreads
|
||||
to housekeeping CPUs, which can tolerate OS jitter.
|
||||
b. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
2. Enable RCU to do its processing remotely via dyntick-idle by
|
||||
doing all of the following:
|
||||
a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
|
||||
b. Ensure that the CPU goes idle frequently, allowing other
|
||||
CPUs to detect that it has passed through an RCU quiescent
|
||||
state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
|
||||
userspace execution also allows other CPUs to detect that
|
||||
the CPU in question has passed through a quiescent state.
|
||||
c. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
|
||||
Name: rcuc/%u
|
||||
Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build the kernel with CONFIG_PREEMPT=n. This prevents these
|
||||
kthreads from being created in the first place, and also obviates
|
||||
the need for RCU priority boosting. This approach is feasible
|
||||
for workloads that do not require high degrees of responsiveness.
|
||||
2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
|
||||
kthreads from being created in the first place. This approach
|
||||
is feasible only if your workload never requires RCU priority
|
||||
boosting, for example, if you ensure frequent idle time on all
|
||||
CPUs that might execute within the kernel.
|
||||
3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
which offloads all RCU callbacks to kthreads that can be moved
|
||||
off of CPUs susceptible to OS jitter. This approach prevents the
|
||||
rcuc/%u kthreads from having any work to do, so that they are
|
||||
never awakened.
|
||||
4. Ensure that the CPU never enters the kernel, and, in particular,
|
||||
avoid initiating any CPU hotplug operations on this CPU. This is
|
||||
another way of preventing any callbacks from being queued on the
|
||||
CPU, again preventing the rcuc/%u kthreads from having any work
|
||||
to do.
|
||||
|
||||
Name: rcuob/%d, rcuop/%d, and rcuos/%d
|
||||
Purpose: Offload RCU callbacks from the corresponding CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Use affinity, cgroups, or other mechanism to force these kthreads
|
||||
to execute on some other CPU.
|
||||
2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
|
||||
kthreads from being created in the first place. However, please
|
||||
note that this will not eliminate OS jitter, but will instead
|
||||
shift it to RCU_SOFTIRQ.
|
||||
|
||||
Name: watchdog/%u
|
||||
Purpose: Detect software lockups on each CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
|
||||
kthreads from being created in the first place.
|
||||
2. Echo a zero to /proc/sys/kernel/watchdog to disable the
|
||||
watchdog timer.
|
||||
3. Echo a large number of /proc/sys/kernel/watchdog_thresh in
|
||||
order to reduce the frequency of OS jitter due to the watchdog
|
||||
timer down to a level that is acceptable for your workload.
|
@ -268,7 +268,7 @@ situations.
|
||||
System Power Management Phases
|
||||
------------------------------
|
||||
Suspending or resuming the system is done in several phases. Different phases
|
||||
are used for standby or memory sleep states ("suspend-to-RAM") and the
|
||||
are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
|
||||
hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
|
||||
for every device before the next phase begins. Not all busses or classes
|
||||
support all these callbacks and not all drivers use all the callbacks. The
|
||||
@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
|
||||
|
||||
Entering System Suspend
|
||||
-----------------------
|
||||
When the system goes into the standby or memory sleep state, the phases are:
|
||||
When the system goes into the freeze, standby or memory sleep state,
|
||||
the phases are:
|
||||
|
||||
prepare, suspend, suspend_late, suspend_noirq.
|
||||
|
||||
@ -368,7 +369,7 @@ the devices that were suspended.
|
||||
|
||||
Leaving System Suspend
|
||||
----------------------
|
||||
When resuming from standby or memory sleep, the phases are:
|
||||
When resuming from freeze, standby or memory sleep, the phases are:
|
||||
|
||||
resume_noirq, resume_early, resume, complete.
|
||||
|
||||
@ -433,8 +434,8 @@ the system log.
|
||||
|
||||
Entering Hibernation
|
||||
--------------------
|
||||
Hibernating the system is more complicated than putting it into the standby or
|
||||
memory sleep state, because it involves creating and saving a system image.
|
||||
Hibernating the system is more complicated than putting it into the other
|
||||
sleep states, because it involves creating and saving a system image.
|
||||
Therefore there are more phases for hibernation, with a different set of
|
||||
callbacks. These phases always run after tasks have been frozen and memory has
|
||||
been freed.
|
||||
@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
|
||||
|
||||
At this point the system image is saved, and the devices then need to be
|
||||
prepared for the upcoming system shutdown. This is much like suspending them
|
||||
before putting the system into the standby or memory sleep state, and the phases
|
||||
are similar.
|
||||
before putting the system into the freeze, standby or memory sleep state,
|
||||
and the phases are similar.
|
||||
|
||||
9. The prepare phase is discussed above.
|
||||
|
||||
|
@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
|
||||
is mounted at /sys).
|
||||
|
||||
/sys/power/state controls system power state. Reading from this file
|
||||
returns what states are supported, which is hard-coded to 'standby'
|
||||
(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
returns what states are supported, which is hard-coded to 'freeze',
|
||||
'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
(Suspend-to-Disk).
|
||||
|
||||
Writing to this file one of those strings causes the system to
|
||||
|
@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
|
||||
The subsystems or drivers having such needs can register suspend notifiers that
|
||||
will be called upon the following events by the PM core:
|
||||
|
||||
PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will
|
||||
be frozen immediately.
|
||||
PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
|
||||
immediately. This is different from PM_SUSPEND_PREPARE
|
||||
below because here we do additional work between notifiers
|
||||
and drivers freezing.
|
||||
|
||||
PM_POST_HIBERNATION The system memory state has been restored from a
|
||||
hibernation image or an error occurred during
|
||||
|
@ -2,12 +2,26 @@
|
||||
System Power Management States
|
||||
|
||||
|
||||
The kernel supports three power management states generically, though
|
||||
each is dependent on platform support code to implement the low-level
|
||||
details for each state. This file describes each state, what they are
|
||||
The kernel supports four power management states generically, though
|
||||
one is generic and the other three are dependent on platform support
|
||||
code to implement the low-level details for each state.
|
||||
This file describes each state, what they are
|
||||
commonly called, what ACPI state they map to, and what string to write
|
||||
to /sys/power/state to enter that state
|
||||
|
||||
state: Freeze / Low-Power Idle
|
||||
ACPI state: S0
|
||||
String: "freeze"
|
||||
|
||||
This state is a generic, pure software, light-weight, low-power state.
|
||||
It allows more energy to be saved relative to idle by freezing user
|
||||
space and putting all I/O devices into low-power states (possibly
|
||||
lower-power than available at run time), such that the processors can
|
||||
spend more time in their idle states.
|
||||
This state can be used for platforms without Standby/Suspend-to-RAM
|
||||
support, or it can be used in addition to Suspend-to-RAM (memory sleep)
|
||||
to provide reduced resume latency.
|
||||
|
||||
|
||||
State: Standby / Power-On Suspend
|
||||
ACPI State: S1
|
||||
@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
|
||||
also offers low power savings, but low resume latency. Not all devices
|
||||
support D1, and those that don't are left on.
|
||||
|
||||
A transition from Standby to the On state should take about 1-2
|
||||
seconds.
|
||||
|
||||
|
||||
State: Suspend-to-RAM
|
||||
ACPI State: S3
|
||||
@ -42,9 +53,6 @@ transition back to the On state.
|
||||
For at least ACPI, STR requires some minimal boot-strapping code to
|
||||
resume the system from STR. This may be true on other platforms.
|
||||
|
||||
A transition from Suspend-to-RAM to the On state should take about
|
||||
3-5 seconds.
|
||||
|
||||
|
||||
State: Suspend-to-disk
|
||||
ACPI State: S4
|
||||
@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
|
||||
down offers greater savings, and allows this mechanism to work on any
|
||||
system. However, entering a real low-power state allows the user to
|
||||
trigger wake up events (e.g. pressing a key or opening a laptop lid).
|
||||
|
||||
A transition from Suspend-to-Disk to the On state should take about 30
|
||||
seconds, though it's typically a bit more with the current
|
||||
implementation.
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -4976,6 +4976,13 @@ S: Maintained
|
||||
F: Documentation/hwmon/lm90
|
||||
F: drivers/hwmon/lm90.c
|
||||
|
||||
LM95234 HARDWARE MONITOR DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/lm95234
|
||||
F: drivers/hwmon/lm95234.c
|
||||
|
||||
LME2510 MEDIA DRIVER
|
||||
M: Malcolm Priestley <tvboxspy@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -7854,7 +7861,7 @@ L: linux-scsi@vger.kernel.org
|
||||
L: target-devel@vger.kernel.org
|
||||
L: http://groups.google.com/group/linux-iscsi-target-dev
|
||||
W: http://www.linux-iscsi.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
|
||||
S: Supported
|
||||
F: drivers/target/
|
||||
F: include/target/
|
||||
@ -8182,6 +8189,13 @@ F: drivers/mmc/host/sh_mobile_sdhi.c
|
||||
F: include/linux/mmc/tmio.h
|
||||
F: include/linux/mmc/sh_mobile_sdhi.h
|
||||
|
||||
TMP401 HARDWARE MONITOR DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/tmp401
|
||||
F: drivers/hwmon/tmp401.c
|
||||
|
||||
TMPFS (SHMEM FILESYSTEM)
|
||||
M: Hugh Dickins <hughd@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
|
||||
config GENERIC_SMP_IDLE_THREAD
|
||||
bool
|
||||
|
||||
config GENERIC_IDLE_POLL_SETUP
|
||||
bool
|
||||
|
||||
# Select if arch init_task initializer is different to init/init_task.c
|
||||
config ARCH_INIT_TASK
|
||||
bool
|
||||
|
@ -38,6 +38,7 @@ config ARM
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_KERNEL_LZO
|
||||
@ -488,7 +489,7 @@ config ARCH_IXP4XX
|
||||
config ARCH_DOVE
|
||||
bool "Marvell Dove"
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select CPU_V7
|
||||
select CPU_PJ4
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select MIGHT_HAVE_PCI
|
||||
select PINCTRL
|
||||
|
@ -309,7 +309,7 @@ define archhelp
|
||||
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
|
||||
echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
|
||||
echo ' uImage - U-Boot wrapped zImage'
|
||||
echo ' bootpImage - Combined zImage and initial RAM disk'
|
||||
echo ' bootpImage - Combined zImage and initial RAM disk'
|
||||
echo ' (supply initrd image via make variable INITRD=<path>)'
|
||||
echo '* dtbs - Build device tree blobs for enabled boards'
|
||||
echo ' install - Install uncompressed kernel'
|
||||
|
@ -15,8 +15,6 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/mcpm.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/smp_plat.h>
|
||||
@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
|
||||
static void __cpuinit mcpm_secondary_init(unsigned int cpu)
|
||||
{
|
||||
mcpm_cpu_powered_up();
|
||||
gic_secondary_init(0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
|
||||
atomic64_t, \
|
||||
counter), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n)))
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
|
||||
local64_t, \
|
||||
a), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n)))
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
|
@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "missing register base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
emc_regbase = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(emc_regbase))
|
||||
return PTR_ERR(emc_regbase);
|
||||
|
@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!regs) {
|
||||
dev_err(dev, "failed to find registers\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
adc->regs = devm_ioremap_resource(dev, regs);
|
||||
if (IS_ERR(adc->regs))
|
||||
return PTR_ERR(adc->regs);
|
||||
|
@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
|
||||
|
||||
static int __init xen_secondary_init(unsigned int cpu)
|
||||
static void __init xen_percpu_init(void *unused)
|
||||
{
|
||||
struct vcpu_register_vcpu_info info;
|
||||
struct vcpu_info *vcpup;
|
||||
int err;
|
||||
int cpu = get_cpu();
|
||||
|
||||
pr_info("Xen: initializing cpu%d\n", cpu);
|
||||
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
|
||||
@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
|
||||
info.offset = offset_in_page(vcpup);
|
||||
|
||||
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
|
||||
if (err) {
|
||||
pr_debug("register_vcpu_info failed: err=%d\n", err);
|
||||
} else {
|
||||
/* This cpu is using the registered vcpu info, even if
|
||||
later ones fail to. */
|
||||
per_cpu(xen_vcpu, cpu) = vcpup;
|
||||
}
|
||||
return 0;
|
||||
BUG_ON(err);
|
||||
per_cpu(xen_vcpu, cpu) = vcpup;
|
||||
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
}
|
||||
|
||||
static void xen_restart(char str, const char *cmd)
|
||||
@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
|
||||
const char *version = NULL;
|
||||
const char *xen_prefix = "xen,xen-";
|
||||
struct resource res;
|
||||
int i;
|
||||
|
||||
node = of_find_compatible_node(NULL, NULL, "xen,xen");
|
||||
if (!node) {
|
||||
@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
|
||||
sizeof(struct vcpu_info));
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
for_each_online_cpu(i)
|
||||
xen_secondary_init(i);
|
||||
|
||||
gnttab_init();
|
||||
if (!xen_initial_domain())
|
||||
xenbus_probe(NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(xen_guest_init);
|
||||
|
||||
static int __init xen_pm_init(void)
|
||||
{
|
||||
pm_power_off = xen_power_off;
|
||||
arm_pm_restart = xen_restart;
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(xen_guest_init);
|
||||
subsys_initcall(xen_pm_init);
|
||||
|
||||
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||
{
|
||||
@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static __init void xen_percpu_enable_events(void *unused)
|
||||
{
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
}
|
||||
|
||||
static int __init xen_init_events(void)
|
||||
{
|
||||
if (!xen_domain() || xen_events_irq < 0)
|
||||
@ -303,7 +298,7 @@ static int __init xen_init_events(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
on_each_cpu(xen_percpu_enable_events, NULL, 0);
|
||||
on_each_cpu(xen_percpu_init, NULL, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -122,8 +122,6 @@ endmenu
|
||||
|
||||
menu "Kernel Features"
|
||||
|
||||
source "kernel/time/Kconfig"
|
||||
|
||||
config ARM64_64K_PAGES
|
||||
bool "Enable 64KB pages support"
|
||||
help
|
||||
|
@ -82,7 +82,7 @@
|
||||
|
||||
.macro enable_dbg_if_not_stepping, tmp
|
||||
mrs \tmp, mdscr_el1
|
||||
tbnz \tmp, #1, 9990f
|
||||
tbnz \tmp, #0, 9990f
|
||||
enable_dbg
|
||||
9990:
|
||||
.endm
|
||||
|
@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
|
||||
*/
|
||||
static void clear_os_lock(void *unused)
|
||||
{
|
||||
asm volatile("msr mdscr_el1, %0" : : "r" (0));
|
||||
isb();
|
||||
asm volatile("msr oslar_el1, %0" : : "r" (0));
|
||||
isb();
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n)
|
||||
}
|
||||
}
|
||||
|
||||
static struct console early_console = {
|
||||
static struct console early_console_dev = {
|
||||
.name = "earlycon",
|
||||
.write = early_write,
|
||||
.flags = CON_PRINTBUFFER | CON_BOOT,
|
||||
@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf)
|
||||
early_base = early_io_map(paddr, EARLYCON_IOBASE);
|
||||
|
||||
printch = match->printch;
|
||||
register_console(&early_console);
|
||||
early_console = &early_console_dev;
|
||||
register_console(&early_console_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init arm64_of_clk_init(void)
|
||||
static int __init arm64_device_init(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arm64_of_clk_init);
|
||||
arch_initcall(arm64_device_init);
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_data);
|
||||
|
||||
@ -305,13 +306,6 @@ static int __init topology_init(void)
|
||||
}
|
||||
subsys_initcall(topology_init);
|
||||
|
||||
static int __init arm64_device_probe(void)
|
||||
{
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(arm64_device_probe);
|
||||
|
||||
static const char *hwcap_str[] = {
|
||||
"fp",
|
||||
"asimd",
|
||||
|
@ -52,7 +52,7 @@ loop1:
|
||||
add x2, x2, #4 // add 4 (line length offset)
|
||||
mov x4, #0x3ff
|
||||
and x4, x4, x1, lsr #3 // find maximum number on the way size
|
||||
clz x5, x4 // find bit position of way size increment
|
||||
clz w5, w4 // find bit position of way size increment
|
||||
mov x7, #0x7fff
|
||||
and x7, x7, x1, lsr #13 // extract max number of the index size
|
||||
loop2:
|
||||
|
@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
|
||||
|
||||
mov x0, #3 << 20
|
||||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
mov x0, #1
|
||||
msr oslar_el1, x0 // Set the debug OS lock
|
||||
msr mdscr_el1, xzr // Reset mdscr_el1
|
||||
tlbi vmalle1is // invalidate I + D TLBs
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
|
@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
|
||||
*/
|
||||
static inline unsigned long virt_to_phys(volatile const void *address)
|
||||
{
|
||||
return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -336,7 +336,7 @@ enum emulation_result {
|
||||
#define VPN2_MASK 0xffffe000
|
||||
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
|
||||
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
|
||||
#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
|
||||
#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
|
||||
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
|
||||
|
||||
struct kvm_mips_tlb {
|
||||
|
@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
|
||||
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
|
||||
#endif
|
||||
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
|
||||
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
||||
|
||||
#define ASID_INC(asid) \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_inc,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid) \
|
||||
:"0" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_MASK(asid) \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
|
||||
".section\t__asid_mask,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid) \
|
||||
:"r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_VERSION_MASK \
|
||||
({ \
|
||||
unsigned long __asid; \
|
||||
__asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_version_mask,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_FIRST_VERSION \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_first_version,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_INC 0x40
|
||||
#define ASID_MASK 0xfc0
|
||||
|
||||
#define ASID_FIRST_VERSION_R3000 0x1000
|
||||
#define ASID_FIRST_VERSION_R4000 0x100
|
||||
#define ASID_FIRST_VERSION_R8000 0x1000
|
||||
#define ASID_FIRST_VERSION_RM9000 0x1000
|
||||
#elif defined(CONFIG_CPU_R8000)
|
||||
|
||||
#define ASID_INC 0x10
|
||||
#define ASID_MASK 0xff0
|
||||
|
||||
#elif defined(CONFIG_MIPS_MT_SMTC)
|
||||
|
||||
#define ASID_INC 0x1
|
||||
extern unsigned long smtc_asid_mask;
|
||||
#define ASID_MASK (smtc_asid_mask)
|
||||
#define HW_ASID_MASK 0xff
|
||||
/* End SMTC/34K debug hack */
|
||||
#else /* FIXME: not correct for R6000 */
|
||||
|
||||
#define ASID_INC 0x1
|
||||
#define ASID_MASK 0xff
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define SMTC_HW_ASID_MASK 0xff
|
||||
extern unsigned int smtc_asid_mask;
|
||||
#endif
|
||||
|
||||
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
|
||||
#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
|
||||
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
|
||||
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* All unused by hardware upper bits will be considered
|
||||
* as a software asid extension.
|
||||
*/
|
||||
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
|
||||
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
/* Normal, classic MIPS get_new_mmu_context */
|
||||
static inline void
|
||||
@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
||||
extern void kvm_local_flush_tlb_all(void);
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
if (!ASID_MASK((asid = ASID_INC(asid)))) {
|
||||
if (! ((asid += ASID_INC) & ASID_MASK) ) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
#ifdef CONFIG_VIRTUALIZATION
|
||||
@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* free up the ASID value for use and flush any old
|
||||
* instances of it from the TLB.
|
||||
*/
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* having ASID_MASK smaller than the hardware maximum,
|
||||
* make sure no "soft" bits become "hard"...
|
||||
*/
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
|
||||
cpu_asid(cpu, next));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
mtflags = dvpe();
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = read_c0_entryhi() & ASID_MASK;
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
|
||||
cpu_asid(cpu, next));
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
|
||||
cpu_asid(cpu, next));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
#else
|
||||
@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
prevvpe = dvpe();
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if (smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
|
||||
| cpu_asid(cpu, mm));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(prevvpe);
|
||||
|
@ -46,7 +46,6 @@
|
||||
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
|
||||
#endif
|
||||
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int __init parse_savemaxmem(char *p)
|
||||
{
|
||||
|
@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
.set noreorder
|
||||
/* check if TLB contains a entry for EPC */
|
||||
MFC0 k1, CP0_ENTRYHI
|
||||
andi k1, 0xff /* ASID_MASK patched at run-time!! */
|
||||
andi k1, 0xff /* ASID_MASK */
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_SRL k0, _PAGE_SHIFT + 1
|
||||
PTR_SLL k0, _PAGE_SHIFT + 1
|
||||
|
@ -224,6 +224,9 @@ struct mips_frame_info {
|
||||
int pc_offset;
|
||||
};
|
||||
|
||||
#define J_TARGET(pc,target) \
|
||||
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
|
||||
|
||||
static inline int is_ra_save_ins(union mips_instruction *ip)
|
||||
{
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
@ -264,7 +267,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
|
||||
static inline int is_jump_ins(union mips_instruction *ip)
|
||||
{
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
/*
|
||||
@ -288,6 +291,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
|
||||
return 0;
|
||||
return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
|
||||
#else
|
||||
if (ip->j_format.opcode == j_op)
|
||||
return 1;
|
||||
if (ip->j_format.opcode == jal_op)
|
||||
return 1;
|
||||
if (ip->r_format.opcode != spec_op)
|
||||
@ -350,7 +355,7 @@ static int get_frame_info(struct mips_frame_info *info)
|
||||
|
||||
for (i = 0; i < max_insns; i++, ip++) {
|
||||
|
||||
if (is_jal_jalr_jr_ins(ip))
|
||||
if (is_jump_ins(ip))
|
||||
break;
|
||||
if (!info->frame_size) {
|
||||
if (is_sp_move_ins(ip))
|
||||
@ -393,15 +398,42 @@ err:
|
||||
|
||||
static struct mips_frame_info schedule_mfi __read_mostly;
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
static unsigned long get___schedule_addr(void)
|
||||
{
|
||||
return kallsyms_lookup_name("__schedule");
|
||||
}
|
||||
#else
|
||||
static unsigned long get___schedule_addr(void)
|
||||
{
|
||||
union mips_instruction *ip = (void *)schedule;
|
||||
int max_insns = 8;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max_insns; i++, ip++) {
|
||||
if (ip->j_format.opcode == j_op)
|
||||
return J_TARGET(ip, ip->j_format.target);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init frame_info_init(void)
|
||||
{
|
||||
unsigned long size = 0;
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
unsigned long ofs;
|
||||
|
||||
kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
|
||||
#endif
|
||||
schedule_mfi.func = schedule;
|
||||
unsigned long addr;
|
||||
|
||||
addr = get___schedule_addr();
|
||||
if (!addr)
|
||||
addr = (unsigned long)schedule;
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
kallsyms_lookup_size_offset(addr, &size, &ofs);
|
||||
#endif
|
||||
schedule_mfi.func = (void *)addr;
|
||||
schedule_mfi.func_size = size;
|
||||
|
||||
get_frame_info(&schedule_mfi);
|
||||
|
@ -111,7 +111,7 @@ static int vpe0limit;
|
||||
static int ipibuffers;
|
||||
static int nostlb;
|
||||
static int asidmask;
|
||||
unsigned int smtc_asid_mask = 0xff;
|
||||
unsigned long smtc_asid_mask = 0xff;
|
||||
|
||||
static int __init vpe0tcs(char *str)
|
||||
{
|
||||
@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
||||
asid = asid_cache(cpu);
|
||||
|
||||
do {
|
||||
if (!ASID_MASK(ASID_INC(asid))) {
|
||||
if (!((asid += ASID_INC) & ASID_MASK) ) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
/* Traverse all online CPUs (hack requires contiguous range) */
|
||||
@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
||||
mips_ihb();
|
||||
}
|
||||
tcstat = read_tc_c0_tcstatus();
|
||||
smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
|
||||
smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
|
||||
if (!prevhalt)
|
||||
write_tc_c0_tchalt(0);
|
||||
}
|
||||
@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
||||
asid = ASID_FIRST_VERSION;
|
||||
local_flush_tlb_all(); /* start new asid cycle */
|
||||
}
|
||||
} while (smtc_live_asid[tlb][ASID_MASK(asid)]);
|
||||
} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
|
||||
|
||||
/*
|
||||
* SMTC shares the TLB within VPEs and possibly across all VPEs.
|
||||
@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
|
||||
tlb_read();
|
||||
ehb();
|
||||
ehi = read_c0_entryhi();
|
||||
if (ASID_MASK(ehi) == asid) {
|
||||
if ((ehi & ASID_MASK) == asid) {
|
||||
/*
|
||||
* Invalidate only entries with specified ASID,
|
||||
* makiing sure all entries differ.
|
||||
|
@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int status_set = ST0_CU0;
|
||||
unsigned int hwrena = cpu_hwrena_impl_bits;
|
||||
unsigned long asid = 0;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
int secondaryTC = 0;
|
||||
int bootTC = (cpu == 0);
|
||||
@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
asid = ASID_FIRST_VERSION;
|
||||
cpu_data[cpu].asid_cache = asid;
|
||||
TLBMISS_HANDLER_SETUP();
|
||||
if (!cpu_data[cpu].asid_cache)
|
||||
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
|
||||
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
|
@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
printk("MTCz, cop0->reg[EBASE]: %#lx\n",
|
||||
kvm_read_c0_guest_ebase(cop0));
|
||||
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
|
||||
uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
|
||||
uint32_t nasid =
|
||||
vcpu->arch.gprs[rt] & ASID_MASK;
|
||||
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
|
||||
&&
|
||||
(ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
|
||||
!= nasid)) {
|
||||
((kvm_read_c0_guest_entryhi(cop0) &
|
||||
ASID_MASK) != nasid)) {
|
||||
|
||||
kvm_debug
|
||||
("MTCz, change ASID from %#lx to %#lx\n",
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
|
||||
ASID_MASK(vcpu->arch.gprs[rt]));
|
||||
kvm_read_c0_guest_entryhi(cop0) &
|
||||
ASID_MASK,
|
||||
vcpu->arch.gprs[rt] & ASID_MASK);
|
||||
|
||||
/* Blow away the shadow host TLBs */
|
||||
kvm_mips_flush_host_tlb(1);
|
||||
@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
* resulting handler will do the right thing
|
||||
*/
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(cop0) & ASID_MASK));
|
||||
|
||||
if (index < 0) {
|
||||
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
|
||||
@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi =
|
||||
(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
|
||||
*/
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu,
|
||||
(va & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi
|
||||
(vcpu->arch.cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(vcpu->arch.cop0) & ASID_MASK));
|
||||
if (index < 0) {
|
||||
if (exccode == T_TLB_LD_MISS) {
|
||||
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
|
||||
|
@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
|
||||
|
||||
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
|
||||
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
|
||||
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
|
||||
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
|
||||
@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void)
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
||||
printk("HOST TLBs:\n");
|
||||
printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
|
||||
printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
|
||||
|
||||
for (i = 0; i < current_cpu_data.tlbsize; i++) {
|
||||
write_c0_index(i);
|
||||
@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
||||
|
||||
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
|
||||
if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
|
||||
(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
|
||||
(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
||||
{
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
if (!(ASID_MASK(ASID_INC(asid)))) {
|
||||
if (!((asid += ASID_INC) & ASID_MASK)) {
|
||||
if (cpu_has_vtag_icache) {
|
||||
flush_icache_all();
|
||||
}
|
||||
@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
if (!newasid) {
|
||||
/* If we preempted while the guest was executing, then reload the pre-empted ASID */
|
||||
if (current->flags & PF_VCPU) {
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
preempt_entryhi & ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
} else {
|
||||
@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.
|
||||
guest_kernel_asid[cpu]));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_kernel_asid[cpu] &
|
||||
ASID_MASK);
|
||||
else
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.
|
||||
guest_user_asid[cpu]));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_user_asid[cpu] &
|
||||
ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
kvm_mips_guest_tlb_lookup(vcpu,
|
||||
((unsigned long) opc & VPN2_MASK)
|
||||
|
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(cop0) & ASID_MASK));
|
||||
if (index < 0) {
|
||||
kvm_err
|
||||
("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
|
||||
|
@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Failed to get resource\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* remap gptu register range */
|
||||
gptu_membase = devm_ioremap_resource(&pdev->dev, res);
|
||||
@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
|
||||
if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
|
||||
dev_err(&pdev->dev, "Failed to find magic\n");
|
||||
gptu_hwexit();
|
||||
clk_disable(clk);
|
||||
clk_put(clk);
|
||||
return -ENAVAIL;
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static inline const char *msk2str(unsigned int mask)
|
||||
{
|
||||
@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
|
||||
s_pagemask = read_c0_pagemask();
|
||||
s_entryhi = read_c0_entryhi();
|
||||
s_index = read_c0_index();
|
||||
asid = ASID_MASK(s_entryhi);
|
||||
asid = s_entryhi & 0xff;
|
||||
|
||||
for (i = first; i <= last; i++) {
|
||||
write_c0_index(i);
|
||||
@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
|
||||
|
||||
printk("va=%0*lx asid=%02lx\n",
|
||||
width, (entryhi & ~0x1fffUL),
|
||||
ASID_MASK(entryhi));
|
||||
entryhi & 0xff);
|
||||
printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
|
||||
width,
|
||||
(entrylo0 << 6) & PAGE_MASK, c0,
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
|
||||
unsigned int asid;
|
||||
unsigned long entryhi, entrylo0;
|
||||
|
||||
asid = ASID_MASK(read_c0_entryhi());
|
||||
asid = read_c0_entryhi() & 0xfc0;
|
||||
|
||||
for (i = first; i <= last; i++) {
|
||||
write_c0_index(i<<8);
|
||||
@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
|
||||
|
||||
/* Unused entries have a virtual address of KSEG0. */
|
||||
if ((entryhi & 0xffffe000) != 0x80000000
|
||||
&& (ASID_MASK(entryhi) == asid)) {
|
||||
&& (entryhi & 0xfc0) == asid) {
|
||||
/*
|
||||
* Only print entries in use
|
||||
*/
|
||||
@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
|
||||
printk("va=%08lx asid=%08lx"
|
||||
" [pa=%06lx n=%d d=%d v=%d g=%d]",
|
||||
(entryhi & 0xffffe000),
|
||||
ASID_MASK(entryhi),
|
||||
entryhi & 0xfc0,
|
||||
entrylo0 & PAGE_MASK,
|
||||
(entrylo0 & (1 << 11)) ? 1 : 0,
|
||||
(entrylo0 & (1 << 10)) ? 1 : 0,
|
||||
|
@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(0);
|
||||
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
|
||||
for (; entry < current_cpu_data.tlbsize; entry++) {
|
||||
@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
|
||||
ASID_MASK(cpu_context(cpu, mm)), start, end);
|
||||
cpu_context(cpu, mm) & ASID_MASK, start, end);
|
||||
#endif
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size <= current_cpu_data.tlbsize) {
|
||||
int oldpid = ASID_MASK(read_c0_entryhi());
|
||||
int newpid = ASID_MASK(cpu_context(cpu, mm));
|
||||
int oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
int newpid = cpu_context(cpu, mm) & ASID_MASK;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += PAGE_SIZE - 1;
|
||||
@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
|
||||
#endif
|
||||
newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
|
||||
newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
|
||||
page &= PAGE_MASK;
|
||||
local_irq_save(flags);
|
||||
oldpid = ASID_MASK(read_c0_entryhi());
|
||||
oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entryhi(page | newpid);
|
||||
BARRIER;
|
||||
tlb_probe();
|
||||
@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
|
||||
if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
|
||||
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
|
||||
(cpu_context(cpu, vma->vm_mm)), pid);
|
||||
}
|
||||
@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
old_pagemask = read_c0_pagemask();
|
||||
w = read_c0_wired();
|
||||
write_c0_wired(w + 1);
|
||||
@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(wired);
|
||||
|
@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
||||
|
||||
ENTER_CRITICAL(flags);
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
address &= (PAGE_MASK << 1);
|
||||
write_c0_entryhi(address | pid);
|
||||
pgdp = pgd_offset(vma->vm_mm, address);
|
||||
|
@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
||||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
local_irq_save(flags);
|
||||
address &= PAGE_MASK;
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/war.h>
|
||||
@ -306,78 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
|
||||
static int check_for_high_segbits __cpuinitdata;
|
||||
#endif
|
||||
|
||||
static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
|
||||
unsigned int i_const)
|
||||
{
|
||||
unsigned int **p;
|
||||
|
||||
for (p = start; p < stop; p++) {
|
||||
#ifndef CONFIG_CPU_MICROMIPS
|
||||
unsigned int *ip;
|
||||
|
||||
ip = *p;
|
||||
*ip = (*ip & 0xffff0000) | i_const;
|
||||
#else
|
||||
unsigned short *ip;
|
||||
|
||||
ip = ((unsigned short *)((unsigned int)*p - 1));
|
||||
if ((*ip & 0xf000) == 0x4000) {
|
||||
*ip &= 0xfff1;
|
||||
*ip |= (i_const << 1);
|
||||
} else if ((*ip & 0xf000) == 0x6000) {
|
||||
*ip &= 0xfff1;
|
||||
*ip |= ((i_const >> 2) << 1);
|
||||
} else {
|
||||
ip++;
|
||||
*ip = i_const;
|
||||
}
|
||||
#endif
|
||||
local_flush_icache_range((unsigned long)ip,
|
||||
(unsigned long)ip + sizeof(*ip));
|
||||
}
|
||||
}
|
||||
|
||||
#define asid_insn_fixup(section, const) \
|
||||
do { \
|
||||
extern unsigned int *__start_ ## section; \
|
||||
extern unsigned int *__stop_ ## section; \
|
||||
insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* Caller is assumed to flush the caches before the first context switch.
|
||||
*/
|
||||
static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
|
||||
unsigned int version_mask,
|
||||
unsigned int first_version)
|
||||
{
|
||||
extern asmlinkage void handle_ri_rdhwr_vivt(void);
|
||||
unsigned long *vivt_exc;
|
||||
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
/*
|
||||
* Worst case optimised microMIPS addiu instructions support
|
||||
* only a 3-bit immediate value.
|
||||
*/
|
||||
if(inc > 7)
|
||||
panic("Invalid ASID increment value!");
|
||||
#endif
|
||||
asid_insn_fixup(__asid_inc, inc);
|
||||
asid_insn_fixup(__asid_mask, mask);
|
||||
asid_insn_fixup(__asid_version_mask, version_mask);
|
||||
asid_insn_fixup(__asid_first_version, first_version);
|
||||
|
||||
/* Patch up the 'handle_ri_rdhwr_vivt' handler. */
|
||||
vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
|
||||
#endif
|
||||
vivt_exc++;
|
||||
*vivt_exc = (*vivt_exc & ~mask) | mask;
|
||||
|
||||
current_cpu_data.asid_cache = first_version;
|
||||
}
|
||||
|
||||
static int check_for_high_segbits __cpuinitdata;
|
||||
|
||||
static unsigned int kscratch_used_mask __cpuinitdata;
|
||||
@ -2256,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void)
|
||||
case CPU_TX3922:
|
||||
case CPU_TX3927:
|
||||
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
|
||||
if (cpu_has_local_ebase)
|
||||
build_r3000_tlb_refill_handler();
|
||||
if (!run_once) {
|
||||
@ -2282,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void)
|
||||
break;
|
||||
|
||||
default:
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
|
||||
#else
|
||||
setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
|
||||
#endif
|
||||
if (!run_once) {
|
||||
scratch_reg = allocate_kscratch();
|
||||
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
|
@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
|
||||
return 0; /* foo */
|
||||
}
|
||||
|
||||
static inline int str2eaddr(unsigned char *ea, unsigned char *str)
|
||||
int str2eaddr(unsigned char *ea, unsigned char *str)
|
||||
{
|
||||
int index = 0;
|
||||
unsigned char num = 0;
|
||||
|
@ -55,4 +55,14 @@
|
||||
reg-shift = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
usb@101c0000 {
|
||||
compatible = "ralink,rt3050-usb", "snps,dwc2";
|
||||
reg = <0x101c0000 40000>;
|
||||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <18>;
|
||||
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -43,4 +43,8 @@
|
||||
reg = <0x50000 0x7b0000>;
|
||||
};
|
||||
};
|
||||
|
||||
usb@101c0000 {
|
||||
status = "ok";
|
||||
};
|
||||
};
|
||||
|
@ -245,7 +245,7 @@ config SMP
|
||||
|
||||
config IRQSTACKS
|
||||
bool "Use separate kernel stacks when processing interrupts"
|
||||
default n
|
||||
default y
|
||||
help
|
||||
If you say Y here the kernel will use separate kernel stacks
|
||||
for handling hard and soft interrupts. This can help avoid
|
||||
|
@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm
|
||||
CHECKFLAGS += -D__hppa__=1
|
||||
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
|
||||
|
||||
MACHINE := $(shell uname -m)
|
||||
NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
|
||||
|
||||
ifdef CONFIG_64BIT
|
||||
UTS_MACHINE := parisc64
|
||||
CHECKFLAGS += -D__LP64__=1 -m64
|
||||
WIDTH := 64
|
||||
CC_ARCHES = hppa64
|
||||
else # 32-bit
|
||||
WIDTH :=
|
||||
CC_ARCHES = hppa hppa2.0 hppa1.1
|
||||
endif
|
||||
|
||||
# attempt to help out folks who are cross-compiling
|
||||
ifeq ($(NATIVE),1)
|
||||
CROSS_COMPILE := hppa$(WIDTH)-linux-
|
||||
else
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
|
||||
endif
|
||||
ifneq ($(SUBARCH),$(UTS_MACHINE))
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, \
|
||||
$(foreach a,$(CC_ARCHES), \
|
||||
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
|
||||
endif
|
||||
endif
|
||||
|
||||
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
|
||||
|
@ -11,10 +11,18 @@
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
#define __ARCH_HAS_DO_SOFTIRQ
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
unsigned int kernel_stack_usage;
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
unsigned int irq_stack_usage;
|
||||
unsigned int irq_stack_counter;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int irq_resched_count;
|
||||
@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
|
||||
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
|
||||
#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
|
||||
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
|
||||
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING
|
||||
|
@ -63,10 +63,13 @@
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
|
||||
|
||||
union irq_stack_union {
|
||||
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
|
@ -452,9 +452,41 @@
|
||||
L2_ptep \pgd,\pte,\index,\va,\fault
|
||||
.endm
|
||||
|
||||
/* Acquire pa_dbit_lock lock. */
|
||||
.macro dbit_lock spc,tmp,tmp1
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,\spc,2f
|
||||
load32 PA(pa_dbit_lock),\tmp
|
||||
1: LDCW 0(\tmp),\tmp1
|
||||
cmpib,COND(=) 0,\tmp1,1b
|
||||
nop
|
||||
2:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release pa_dbit_lock lock without reloading lock address. */
|
||||
.macro dbit_unlock0 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw \spc,0(\tmp)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release pa_dbit_lock lock. */
|
||||
.macro dbit_unlock1 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
load32 PA(pa_dbit_lock),\tmp
|
||||
dbit_unlock0 \spc,\tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
|
||||
* don't needlessly dirty the cache line if it was already set */
|
||||
.macro update_ptep ptep,pte,tmp,tmp1
|
||||
.macro update_ptep spc,ptep,pte,tmp,tmp1
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
LDREG 0(\ptep),\pte
|
||||
#endif
|
||||
ldi _PAGE_ACCESSED,\tmp1
|
||||
or \tmp1,\pte,\tmp
|
||||
and,COND(<>) \tmp1,\pte,%r0
|
||||
@ -463,7 +495,11 @@
|
||||
|
||||
/* Set the dirty bit (and accessed bit). No need to be
|
||||
* clever, this is only used from the dirty fault */
|
||||
.macro update_dirty ptep,pte,tmp
|
||||
.macro update_dirty spc,ptep,pte,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
LDREG 0(\ptep),\pte
|
||||
#endif
|
||||
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
|
||||
or \tmp,\pte,\pte
|
||||
STREG \pte,0(\ptep)
|
||||
@ -1111,11 +1147,13 @@ dtlb_miss_20w:
|
||||
|
||||
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1135,11 +1173,13 @@ nadtlb_miss_20w:
|
||||
|
||||
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1161,7 +1201,8 @@ dtlb_miss_11:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
@ -1172,6 +1213,7 @@ dtlb_miss_11:
|
||||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1192,7 +1234,8 @@ nadtlb_miss_11:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
@ -1204,6 +1247,7 @@ nadtlb_miss_11:
|
||||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1224,13 +1268,15 @@ dtlb_miss_20:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1250,13 +1296,15 @@ nadtlb_miss_20:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1357,11 +1405,13 @@ itlb_miss_20w:
|
||||
|
||||
L3_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1379,11 +1429,13 @@ naitlb_miss_20w:
|
||||
|
||||
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1405,7 +1457,8 @@ itlb_miss_11:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
@ -1416,6 +1469,7 @@ itlb_miss_11:
|
||||
iitlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1427,7 +1481,8 @@ naitlb_miss_11:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
@ -1438,6 +1493,7 @@ naitlb_miss_11:
|
||||
iitlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1459,13 +1515,15 @@ itlb_miss_20:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1477,13 +1535,15 @@ naitlb_miss_20:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1507,29 +1567,13 @@ dbit_trap_20w:
|
||||
|
||||
L3_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_20w
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_20w:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,COND(=) 0,t1,dbit_spin_20w
|
||||
nop
|
||||
|
||||
dbit_nolock_20w:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_20w
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_20w:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1543,18 +1587,8 @@ dbit_trap_11:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_11
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_11:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,= 0,t1,dbit_spin_11
|
||||
nop
|
||||
|
||||
dbit_nolock_11:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
@ -1565,13 +1599,7 @@ dbit_nolock_11:
|
||||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_11
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_11:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
@ -1583,32 +1611,15 @@ dbit_trap_20:
|
||||
|
||||
L2_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_20
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_20:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,= 0,t1,dbit_spin_20
|
||||
nop
|
||||
|
||||
dbit_nolock_20:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_20
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_20:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
seq_printf(p, "%*s: ", prec, "STK");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
|
||||
seq_printf(p, " Kernel stack usage\n");
|
||||
seq_puts(p, " Kernel stack usage\n");
|
||||
# ifdef CONFIG_IRQSTACKS
|
||||
seq_printf(p, "%*s: ", prec, "IST");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
|
||||
seq_puts(p, " Interrupt stack usage\n");
|
||||
seq_printf(p, "%*s: ", prec, "ISC");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
|
||||
seq_puts(p, " Interrupt stack usage counter\n");
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "%*s: ", prec, "RES");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
||||
seq_printf(p, " Rescheduling interrupts\n");
|
||||
seq_puts(p, " Rescheduling interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "CAL");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_puts(p, " Function call interrupts\n");
|
||||
#endif
|
||||
seq_printf(p, "%*s: ", prec, "TLB");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
||||
seq_printf(p, " TLB shootdowns\n");
|
||||
seq_puts(p, " TLB shootdowns\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
||||
unsigned long sp = regs->gr[30];
|
||||
unsigned long stack_usage;
|
||||
unsigned int *last_usage;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* if sr7 != 0, we interrupted a userspace process which we do not want
|
||||
* to check for stack overflow. We will only check the kernel stack. */
|
||||
@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
||||
|
||||
/* calculate kernel stack usage */
|
||||
stack_usage = sp - stack_start;
|
||||
last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
if (likely(stack_usage <= THREAD_SIZE))
|
||||
goto check_kernel_stack; /* found kernel stack */
|
||||
|
||||
/* check irq stack usage */
|
||||
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
|
||||
stack_usage = sp - stack_start;
|
||||
|
||||
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
|
||||
if (unlikely(stack_usage > *last_usage))
|
||||
*last_usage = stack_usage;
|
||||
|
||||
if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
|
||||
return;
|
||||
|
||||
pr_emerg("stackcheck: %s will most likely overflow irq stack "
|
||||
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
|
||||
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
|
||||
goto panic_check;
|
||||
|
||||
check_kernel_stack:
|
||||
#endif
|
||||
|
||||
/* check kernel stack usage */
|
||||
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
|
||||
|
||||
if (unlikely(stack_usage > *last_usage))
|
||||
*last_usage = stack_usage;
|
||||
@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
||||
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
|
||||
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
panic_check:
|
||||
#endif
|
||||
if (sysctl_panic_on_stackoverflow)
|
||||
panic("low stack detected by irq handler - check messages\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
|
||||
};
|
||||
|
||||
static void execute_on_irq_stack(void *func, unsigned long param1)
|
||||
{
|
||||
unsigned long *irq_stack_start;
|
||||
union irq_stack_union *union_ptr;
|
||||
unsigned long irq_stack;
|
||||
int cpu = smp_processor_id();
|
||||
raw_spinlock_t *irq_stack_in_use;
|
||||
|
||||
irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
|
||||
irq_stack = (unsigned long) irq_stack_start;
|
||||
irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
|
||||
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
|
||||
irq_stack = (unsigned long) &union_ptr->stack;
|
||||
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
|
||||
64); /* align for stack frame usage */
|
||||
|
||||
BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
|
||||
*irq_stack_start = 1;
|
||||
/* We may be called recursive. If we are already using the irq stack,
|
||||
* just continue to use it. Use spinlocks to serialize
|
||||
* the irq stack usage.
|
||||
*/
|
||||
irq_stack_in_use = &union_ptr->lock;
|
||||
if (!raw_spin_trylock(irq_stack_in_use)) {
|
||||
void (*direct_call)(unsigned long p1) = func;
|
||||
|
||||
/* We are using the IRQ stack already.
|
||||
* Do direct call on current stack. */
|
||||
direct_call(param1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This is where we switch to the IRQ stack. */
|
||||
call_on_stack(param1, func, irq_stack);
|
||||
|
||||
*irq_stack_start = 0;
|
||||
__inc_irq_stat(irq_stack_counter);
|
||||
|
||||
/* free up irq stack usage. */
|
||||
do_raw_spin_unlock(irq_stack_in_use);
|
||||
}
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
{
|
||||
__u32 pending;
|
||||
unsigned long flags;
|
||||
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
pending = local_softirq_pending();
|
||||
|
||||
if (pending)
|
||||
execute_on_irq_stack(__do_softirq, 0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_IRQSTACKS */
|
||||
|
||||
|
@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
|
||||
{
|
||||
int do_recycle;
|
||||
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
__inc_irq_stat(irq_tlb_count);
|
||||
do_recycle = 0;
|
||||
spin_lock(&sid_lock);
|
||||
if (dirty_space_ids > RECYCLE_THRESHOLD) {
|
||||
@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
|
||||
#else
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
__inc_irq_stat(irq_tlb_count);
|
||||
spin_lock(&sid_lock);
|
||||
flush_tlb_all_local(NULL);
|
||||
recycle_sids();
|
||||
|
@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
|
||||
Select this to enable early debugging for the PowerNV platform
|
||||
using an "hvsi" console
|
||||
|
||||
config PPC_EARLY_DEBUG_MEMCONS
|
||||
bool "In memory console"
|
||||
help
|
||||
Select this to enable early debugging using an in memory console.
|
||||
This console provides input and output buffers stored within the
|
||||
kernel BSS and should be safe to select on any system. A debugger
|
||||
can then be used to read kernel output or send input to the console.
|
||||
endchoice
|
||||
|
||||
config PPC_MEMCONS_OUTPUT_SIZE
|
||||
int "In memory console output buffer size"
|
||||
depends on PPC_EARLY_DEBUG_MEMCONS
|
||||
default 4096
|
||||
help
|
||||
Selects the size of the output buffer (in bytes) of the in memory
|
||||
console.
|
||||
|
||||
config PPC_MEMCONS_INPUT_SIZE
|
||||
int "In memory console input buffer size"
|
||||
depends on PPC_EARLY_DEBUG_MEMCONS
|
||||
default 128
|
||||
help
|
||||
Selects the size of the input buffer (in bytes) of the in memory
|
||||
console.
|
||||
|
||||
config PPC_EARLY_DEBUG_OPAL
|
||||
def_bool y
|
||||
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
|
||||
|
10
arch/powerpc/include/asm/context_tracking.h
Normal file
10
arch/powerpc/include/asm/context_tracking.h
Normal file
@ -0,0 +1,10 @@
|
||||
#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
|
||||
#define _ASM_POWERPC_CONTEXT_TRACKING_H
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
#define SCHEDULE_USER bl .schedule_user
|
||||
#else
|
||||
#define SCHEDULE_USER bl .schedule
|
||||
#endif
|
||||
|
||||
#endif
|
@ -52,6 +52,7 @@
|
||||
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
|
||||
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
|
||||
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
|
||||
#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -69,7 +70,8 @@ enum {
|
||||
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
|
||||
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
|
||||
FW_FEATURE_PSERIES_ALWAYS = 0,
|
||||
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
|
||||
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
|
||||
FW_FEATURE_OPALv3,
|
||||
FW_FEATURE_POWERNV_ALWAYS = 0,
|
||||
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
|
||||
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
|
||||
|
@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
|
||||
#endif
|
||||
|
||||
#define hard_irq_disable() do { \
|
||||
u8 _was_enabled = get_paca()->soft_enabled; \
|
||||
__hard_irq_disable(); \
|
||||
if (local_paca->soft_enabled) \
|
||||
trace_hardirqs_off(); \
|
||||
get_paca()->soft_enabled = 0; \
|
||||
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
|
||||
if (_was_enabled) \
|
||||
trace_hardirqs_off(); \
|
||||
} while(0)
|
||||
|
||||
static inline bool lazy_irq_pending(void)
|
||||
|
@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
|
||||
|
||||
enum OpalThreadStatus {
|
||||
OPAL_THREAD_INACTIVE = 0x0,
|
||||
OPAL_THREAD_STARTED = 0x1
|
||||
OPAL_THREAD_STARTED = 0x1,
|
||||
OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
|
||||
};
|
||||
|
||||
enum OpalPciBusCompare {
|
||||
@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
|
||||
|
||||
extern int opal_machine_check(struct pt_regs *regs);
|
||||
|
||||
extern void opal_shutdown(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __OPAL_H */
|
||||
|
@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
||||
|
||||
static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
||||
{
|
||||
return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
|
||||
return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
|
@ -47,7 +47,7 @@
|
||||
* generic accessors and iterators here
|
||||
*/
|
||||
#define __real_pte(e,p) ((real_pte_t) { \
|
||||
(e), ((e) & _PAGE_COMBO) ? \
|
||||
(e), (pte_val(e) & _PAGE_COMBO) ? \
|
||||
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
|
||||
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
|
||||
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
|
||||
|
@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
|
||||
extern void rtas_initialize(void);
|
||||
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
|
||||
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
|
||||
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
|
||||
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
|
||||
extern int rtas_ibm_suspend_me(struct rtas_args *);
|
||||
|
||||
struct rtc_time;
|
||||
|
@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
||||
#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
|
||||
#define TIF_NOHZ 9 /* in adaptive nohz mode */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
|
||||
for stack store? */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_UPROBE (1<<TIF_UPROBE)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
|
||||
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
|
||||
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
|
||||
_TIF_NOHZ)
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
|
||||
|
@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
|
||||
extern void __init udbg_init_cpm(void);
|
||||
extern void __init udbg_init_usbgecko(void);
|
||||
extern void __init udbg_init_wsp(void);
|
||||
extern void __init udbg_init_memcons(void);
|
||||
extern void __init udbg_init_ehv_bc(void);
|
||||
extern void __init udbg_init_ps3gelic(void);
|
||||
extern void __init udbg_init_debug_opal_raw(void);
|
||||
|
@ -439,8 +439,6 @@ ret_from_fork:
|
||||
ret_from_kernel_thread:
|
||||
REST_NVGPRS(r1)
|
||||
bl schedule_tail
|
||||
li r3,0
|
||||
stw r3,0(r1)
|
||||
mtlr r14
|
||||
mr r3,r15
|
||||
PPC440EP_ERR42
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/context_tracking.h>
|
||||
|
||||
/*
|
||||
* System calls.
|
||||
@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
|
||||
_GLOBAL(ret_from_kernel_thread)
|
||||
bl .schedule_tail
|
||||
REST_NVGPRS(r1)
|
||||
li r3,0
|
||||
std r3,0(r1)
|
||||
ld r14, 0(r14)
|
||||
mtlr r14
|
||||
mr r3,r15
|
||||
@ -634,7 +633,7 @@ _GLOBAL(ret_from_except_lite)
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
bl .restore_interrupts
|
||||
bl .schedule
|
||||
SCHEDULE_USER
|
||||
b .ret_from_except_lite
|
||||
|
||||
1: bl .save_nvgprs
|
||||
|
@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
*/
|
||||
|
||||
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
|
||||
andis. r15,r14,DBSR_IC@h
|
||||
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
|
||||
beq+ 1f
|
||||
|
||||
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
|
||||
@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
bge+ cr1,1f
|
||||
|
||||
/* here it looks like we got an inappropriate debug exception. */
|
||||
lis r14,DBSR_IC@h /* clear the IC event */
|
||||
lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
|
||||
rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
|
||||
mtspr SPRN_DBSR,r14
|
||||
mtspr SPRN_CSRR1,r11
|
||||
@ -555,7 +555,7 @@ kernel_dbg_exc:
|
||||
*/
|
||||
|
||||
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
|
||||
andis. r15,r14,DBSR_IC@h
|
||||
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
|
||||
beq+ 1f
|
||||
|
||||
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
|
||||
@ -566,7 +566,7 @@ kernel_dbg_exc:
|
||||
bge+ cr1,1f
|
||||
|
||||
/* here it looks like we got an inappropriate debug exception. */
|
||||
lis r14,DBSR_IC@h /* clear the IC event */
|
||||
lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
|
||||
rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
|
||||
mtspr SPRN_DBSR,r14
|
||||
mtspr SPRN_DSRR1,r11
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/current.h>
|
||||
@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
|
||||
pr_debug("kexec: Starting switchover sequence.\n");
|
||||
|
||||
/* switch to a staticly allocated stack. Based on irq stack code.
|
||||
* We setup preempt_count to avoid using VMX in memcpy.
|
||||
* XXX: the task struct will likely be invalid once we do the copy!
|
||||
*/
|
||||
kexec_stack.thread_info.task = current_thread_info()->task;
|
||||
kexec_stack.thread_info.flags = 0;
|
||||
kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
|
||||
kexec_stack.thread_info.cpu = current_thread_info()->cpu;
|
||||
|
||||
/* We need a static PACA, too; copy this CPU's PACA over and switch to
|
||||
* it. Also poison per_cpu_offset to catch anyone using non-static
|
||||
|
@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
|
||||
li r3,2
|
||||
blr
|
||||
|
||||
_GLOBAL(__bswapdi2)
|
||||
rotlwi r9,r4,8
|
||||
rotlwi r10,r3,8
|
||||
rlwimi r9,r4,24,0,7
|
||||
rlwimi r10,r3,24,0,7
|
||||
rlwimi r9,r4,24,16,23
|
||||
rlwimi r10,r3,24,16,23
|
||||
mr r3,r9
|
||||
mr r4,r10
|
||||
blr
|
||||
|
||||
_GLOBAL(abs)
|
||||
srawi r4,r3,31
|
||||
xor r3,r3,r4
|
||||
|
@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
|
||||
isync
|
||||
blr
|
||||
|
||||
_GLOBAL(__bswapdi2)
|
||||
srdi r8,r3,32
|
||||
rlwinm r7,r3,8,0xffffffff
|
||||
rlwimi r7,r3,24,0,7
|
||||
rlwinm r9,r8,8,0xffffffff
|
||||
rlwimi r7,r3,24,16,23
|
||||
rlwimi r9,r8,24,0,7
|
||||
rlwimi r9,r8,24,16,23
|
||||
sldi r7,r7,32
|
||||
or r3,r7,r9
|
||||
blr
|
||||
|
||||
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
|
||||
/*
|
||||
|
@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
|
||||
enum pci_mmap_state mmap_state,
|
||||
int write_combine)
|
||||
{
|
||||
unsigned long prot = pgprot_val(protection);
|
||||
|
||||
/* Write combine is always 0 on non-memory space mappings. On
|
||||
* memory space, if the user didn't pass 1, we check for a
|
||||
@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
|
||||
|
||||
/* XXX would be nice to have a way to ask for write-through */
|
||||
if (write_combine)
|
||||
return pgprot_noncached_wc(prot);
|
||||
return pgprot_noncached_wc(protection);
|
||||
else
|
||||
return pgprot_noncached(prot);
|
||||
return pgprot_noncached(protection);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
|
||||
int __ucmpdi2(unsigned long long, unsigned long long);
|
||||
EXPORT_SYMBOL(__ucmpdi2);
|
||||
#endif
|
||||
|
||||
long long __bswapdi2(long long);
|
||||
EXPORT_SYMBOL(__bswapdi2);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
|
@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
|
||||
static void prime_debug_regs(struct thread_struct *thread)
|
||||
{
|
||||
/*
|
||||
* We could have inherited MSR_DE from userspace, since
|
||||
* it doesn't get cleared on exception entry. Make sure
|
||||
* MSR_DE is clear before we enable any debug events.
|
||||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
|
||||
mtspr(SPRN_IAC1, thread->iac1);
|
||||
mtspr(SPRN_IAC2, thread->iac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
* do some house keeping and then return from the fork or clone
|
||||
* system call, using the stack frame created above.
|
||||
*/
|
||||
((unsigned long *)sp)[0] = 0;
|
||||
sp -= sizeof(struct pt_regs);
|
||||
kregs = (struct pt_regs *) sp;
|
||||
sp -= STACK_FRAME_OVERHEAD;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <trace/syscall.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
user_exit();
|
||||
|
||||
secure_computing_strict(regs->gpr[0]);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
step = test_thread_flag(TIF_SINGLESTEP);
|
||||
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, step);
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/cpumask.h>
|
||||
@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
|
||||
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
|
||||
}
|
||||
|
||||
enum rtas_cpu_state {
|
||||
DOWN,
|
||||
UP,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
|
||||
cpumask_var_t cpus)
|
||||
{
|
||||
if (!cpumask_empty(cpus)) {
|
||||
cpumask_clear(cpus);
|
||||
return -EINVAL;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
/* On return cpumask will be altered to indicate CPUs changed.
|
||||
* CPUs with states changed will be set in the mask,
|
||||
* CPUs with status unchanged will be unset in the mask. */
|
||||
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
|
||||
cpumask_var_t cpus)
|
||||
{
|
||||
int cpu;
|
||||
int cpuret = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (cpumask_empty(cpus))
|
||||
return 0;
|
||||
|
||||
for_each_cpu(cpu, cpus) {
|
||||
switch (state) {
|
||||
case DOWN:
|
||||
cpuret = cpu_down(cpu);
|
||||
break;
|
||||
case UP:
|
||||
cpuret = cpu_up(cpu);
|
||||
break;
|
||||
}
|
||||
if (cpuret) {
|
||||
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
|
||||
__func__,
|
||||
((state == UP) ? "up" : "down"),
|
||||
cpu, cpuret);
|
||||
if (!ret)
|
||||
ret = cpuret;
|
||||
if (state == UP) {
|
||||
/* clear bits for unchanged cpus, return */
|
||||
cpumask_shift_right(cpus, cpus, cpu);
|
||||
cpumask_shift_left(cpus, cpus, cpu);
|
||||
break;
|
||||
} else {
|
||||
/* clear bit for unchanged cpu, continue */
|
||||
cpumask_clear_cpu(cpu, cpus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int rtas_online_cpus_mask(cpumask_var_t cpus)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rtas_cpu_state_change_mask(UP, cpus);
|
||||
|
||||
if (ret) {
|
||||
cpumask_var_t tmp_mask;
|
||||
|
||||
if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
|
||||
return ret;
|
||||
|
||||
/* Use tmp_mask to preserve cpus mask from first failure */
|
||||
cpumask_copy(tmp_mask, cpus);
|
||||
rtas_offline_cpus_mask(tmp_mask);
|
||||
free_cpumask_var(tmp_mask);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rtas_online_cpus_mask);
|
||||
|
||||
int rtas_offline_cpus_mask(cpumask_var_t cpus)
|
||||
{
|
||||
return rtas_cpu_state_change_mask(DOWN, cpus);
|
||||
}
|
||||
EXPORT_SYMBOL(rtas_offline_cpus_mask);
|
||||
|
||||
int rtas_ibm_suspend_me(struct rtas_args *args)
|
||||
{
|
||||
long state;
|
||||
@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
struct rtas_suspend_me_data data;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
cpumask_var_t offline_mask;
|
||||
int cpuret;
|
||||
|
||||
if (!rtas_service_present("ibm,suspend-me"))
|
||||
return -ENOSYS;
|
||||
@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&data.working, 0);
|
||||
atomic_set(&data.done, 0);
|
||||
atomic_set(&data.error, 0);
|
||||
data.token = rtas_token("ibm,suspend-me");
|
||||
data.complete = &done;
|
||||
|
||||
/* All present CPUs must be online */
|
||||
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
|
||||
cpuret = rtas_online_cpus_mask(offline_mask);
|
||||
if (cpuret) {
|
||||
pr_err("%s: Could not bring present CPUs online.\n", __func__);
|
||||
atomic_set(&data.error, cpuret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
stop_topology_update();
|
||||
|
||||
/* Call function on all CPUs. One of us will make the
|
||||
@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
|
||||
|
||||
start_topology_update();
|
||||
|
||||
/* Take down CPUs not online prior to suspend */
|
||||
cpuret = rtas_offline_cpus_mask(offline_mask);
|
||||
if (cpuret)
|
||||
pr_warn("%s: Could not restore CPUs to offline state.\n",
|
||||
__func__);
|
||||
|
||||
out:
|
||||
free_cpumask_var(offline_mask);
|
||||
return atomic_read(&data.error);
|
||||
}
|
||||
#else /* CONFIG_PPC_PSERIES */
|
||||
|
@ -89,6 +89,7 @@
|
||||
|
||||
/* Array sizes */
|
||||
#define VALIDATE_BUF_SIZE 4096
|
||||
#define VALIDATE_MSG_LEN 256
|
||||
#define RTAS_MSG_MAXLEN 64
|
||||
|
||||
/* Quirk - RTAS requires 4k list length and block size */
|
||||
@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
|
||||
}
|
||||
|
||||
static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
|
||||
char *msg)
|
||||
char *msg, int msglen)
|
||||
{
|
||||
int n;
|
||||
|
||||
@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
|
||||
n = sprintf(msg, "%d\n", args_buf->update_results);
|
||||
if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
|
||||
(args_buf->update_results == VALIDATE_TMP_UPDATE))
|
||||
n += sprintf(msg + n, "%s\n", args_buf->buf);
|
||||
n += snprintf(msg + n, msglen - n, "%s\n",
|
||||
args_buf->buf);
|
||||
} else {
|
||||
n = sprintf(msg, "%d\n", args_buf->status);
|
||||
}
|
||||
@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
|
||||
{
|
||||
struct rtas_validate_flash_t *const args_buf =
|
||||
&rtas_validate_flash_data;
|
||||
char msg[RTAS_MSG_MAXLEN];
|
||||
char msg[VALIDATE_MSG_LEN];
|
||||
int msglen;
|
||||
|
||||
mutex_lock(&rtas_validate_flash_mutex);
|
||||
msglen = get_validate_flash_msg(args_buf, msg);
|
||||
msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
|
||||
mutex_unlock(&rtas_validate_flash_mutex);
|
||||
|
||||
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/signal.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/key.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@ -24,7 +25,7 @@
|
||||
* through debug.exception-trace sysctl.
|
||||
*/
|
||||
|
||||
int show_unhandled_signals = 0;
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
/*
|
||||
* Allocate space for the signal frame
|
||||
@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs)
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
{
|
||||
user_exit();
|
||||
|
||||
if (thread_info_flags & _TIF_UPROBE)
|
||||
uprobe_notify_resume(regs);
|
||||
|
||||
@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/emulated_ops.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs)
|
||||
|
||||
void machine_check_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
int recover = 0;
|
||||
|
||||
__get_cpu_var(irq_stat).mce_exceptions++;
|
||||
@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs)
|
||||
recover = cur_cpu_spec->machine_check(regs);
|
||||
|
||||
if (recover > 0)
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
|
||||
/* the qspan pci read routines can cause machine checks -- Cort
|
||||
@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs)
|
||||
* -- BenH
|
||||
*/
|
||||
bad_page_fault(regs, regs->dar, SIGBUS);
|
||||
return;
|
||||
goto bail;
|
||||
#endif
|
||||
|
||||
if (debugger_fault_handler(regs))
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
if (check_io_access(regs))
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
die("Machine check", regs, SIGBUS);
|
||||
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (!(regs->msr & MSR_RI))
|
||||
panic("Unrecoverable Machine check");
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void SMIException(struct pt_regs *regs)
|
||||
@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs)
|
||||
|
||||
void unknown_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
|
||||
regs->nip, regs->msr, regs->trap);
|
||||
|
||||
_exception(SIGTRAP, regs, 0, 0);
|
||||
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void instruction_breakpoint_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
|
||||
5, SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
goto bail;
|
||||
if (debugger_iabr_match(regs))
|
||||
return;
|
||||
goto bail;
|
||||
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void RunModeException(struct pt_regs *regs)
|
||||
@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs)
|
||||
|
||||
void __kprobes single_step_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
clear_single_step(regs);
|
||||
|
||||
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
|
||||
5, SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
goto bail;
|
||||
if (debugger_sstep(regs))
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr)
|
||||
|
||||
void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
unsigned int reason = get_reason(regs);
|
||||
extern int do_mathemu(struct pt_regs *regs);
|
||||
|
||||
@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
if (reason & REASON_FP) {
|
||||
/* IEEE FP exception */
|
||||
parse_fpe(regs);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
if (reason & REASON_TRAP) {
|
||||
/* Debugger is first in line to stop recursive faults in
|
||||
* rcu_lock, notify_die, or atomic_notifier_call_chain */
|
||||
if (debugger_bpt(regs))
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
/* trap exception */
|
||||
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
|
||||
== NOTIFY_STOP)
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
if (!(regs->msr & MSR_PR) && /* not user-mode */
|
||||
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
|
||||
regs->nip += 4;
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (reason & REASON_TM) {
|
||||
@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
if (!user_mode(regs) &&
|
||||
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
|
||||
regs->nip += 4;
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
/* If usermode caused this, it's done something illegal and
|
||||
* gets a SIGILL slap on the wrist. We call it an illegal
|
||||
@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
*/
|
||||
if (user_mode(regs)) {
|
||||
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
} else {
|
||||
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
|
||||
"at %lx (msr 0x%x)\n", regs->nip, reason);
|
||||
@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
switch (do_mathemu(regs)) {
|
||||
case 0:
|
||||
emulate_single_step(regs);
|
||||
return;
|
||||
goto bail;
|
||||
case 1: {
|
||||
int code = 0;
|
||||
code = __parse_fpscr(current->thread.fpscr.val);
|
||||
_exception(SIGFPE, regs, code, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
case -EFAULT:
|
||||
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
/* fall through on any other errors */
|
||||
#endif /* CONFIG_MATH_EMULATION */
|
||||
@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
case 0:
|
||||
regs->nip += 4;
|
||||
emulate_single_step(regs);
|
||||
return;
|
||||
goto bail;
|
||||
case -EFAULT:
|
||||
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
|
||||
else
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void alignment_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
int sig, code, fixed = 0;
|
||||
|
||||
/* We restore the interrupt state now */
|
||||
@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs)
|
||||
if (fixed == 1) {
|
||||
regs->nip += 4; /* skip over emulated instruction */
|
||||
emulate_single_step(regs);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Operand address was bad */
|
||||
@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs)
|
||||
_exception(sig, regs, code, regs->dar);
|
||||
else
|
||||
bad_page_fault(regs, regs->dar, sig);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void StackOverflow(struct pt_regs *regs)
|
||||
@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs)
|
||||
|
||||
void kernel_fp_unavailable_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
|
||||
"%lx at %lx\n", regs->trap, regs->nip);
|
||||
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
|
||||
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void altivec_unavailable_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
if (user_mode(regs)) {
|
||||
/* A user program has executed an altivec instruction,
|
||||
but this kernel doesn't support altivec. */
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
return;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
|
||||
"%lx at %lx\n", regs->trap, regs->nip);
|
||||
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
void vsx_unavailable_exception(struct pt_regs *regs)
|
||||
|
@ -64,6 +64,9 @@ void __init udbg_early_init(void)
|
||||
udbg_init_usbgecko();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
|
||||
udbg_init_wsp();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
|
||||
/* In memory console */
|
||||
udbg_init_memcons();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
|
||||
udbg_init_ehv_bc();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/page.h>
|
||||
@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
|
||||
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
unsigned long error_code)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
struct vm_area_struct * vma;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
int trap = TRAP(regs);
|
||||
int is_exec = trap == 0x400;
|
||||
int fault;
|
||||
int rc = 0;
|
||||
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
|
||||
/*
|
||||
@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
* look at it
|
||||
*/
|
||||
if (error_code & ICSWX_DSI_UCT) {
|
||||
int rc = acop_handle_fault(regs, address, error_code);
|
||||
rc = acop_handle_fault(regs, address, error_code);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto bail;
|
||||
}
|
||||
#endif /* CONFIG_PPC_ICSWX */
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return 0;
|
||||
goto bail;
|
||||
|
||||
if (unlikely(debugger_fault_handler(regs)))
|
||||
return 0;
|
||||
goto bail;
|
||||
|
||||
/* On a kernel SLB miss we can only check for a valid exception entry */
|
||||
if (!user_mode(regs) && (address >= TASK_SIZE))
|
||||
return SIGSEGV;
|
||||
if (!user_mode(regs) && (address >= TASK_SIZE)) {
|
||||
rc = SIGSEGV;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
|
||||
defined(CONFIG_PPC_BOOK3S_64))
|
||||
if (error_code & DSISR_DABRMATCH) {
|
||||
/* breakpoint match */
|
||||
do_break(regs, address, error_code);
|
||||
return 0;
|
||||
goto bail;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
local_irq_enable();
|
||||
|
||||
if (in_atomic() || mm == NULL) {
|
||||
if (!user_mode(regs))
|
||||
return SIGSEGV;
|
||||
if (!user_mode(regs)) {
|
||||
rc = SIGSEGV;
|
||||
goto bail;
|
||||
}
|
||||
/* in_atomic() in user mode is really bad,
|
||||
as is current->mm == NULL. */
|
||||
printk(KERN_EMERG "Page fault in user mode with "
|
||||
@ -417,9 +424,11 @@ good_area:
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
|
||||
int rc = mm_fault_error(regs, address, fault);
|
||||
rc = mm_fault_error(regs, address, fault);
|
||||
if (rc >= MM_FAULT_RETURN)
|
||||
return rc;
|
||||
goto bail;
|
||||
else
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -454,7 +463,7 @@ good_area:
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
goto bail;
|
||||
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
@ -463,7 +472,7 @@ bad_area_nosemaphore:
|
||||
/* User mode accesses cause a SIGSEGV */
|
||||
if (user_mode(regs)) {
|
||||
_exception(SIGSEGV, regs, code, address);
|
||||
return 0;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (is_exec && (error_code & DSISR_PROTFAULT))
|
||||
@ -471,7 +480,11 @@ bad_area_nosemaphore:
|
||||
" page (%lx) - exploit attempt? (uid: %d)\n",
|
||||
address, from_kuid(&init_user_ns, current_uid()));
|
||||
|
||||
return SIGSEGV;
|
||||
rc = SIGSEGV;
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
return rc;
|
||||
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
|
||||
*/
|
||||
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
pgd_t *pgdir;
|
||||
unsigned long vsid;
|
||||
struct mm_struct *mm;
|
||||
@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
mm = current->mm;
|
||||
if (! mm) {
|
||||
DBG_LOW(" user region with no mm !\n");
|
||||
return 1;
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
psize = get_slice_psize(mm, ea);
|
||||
ssize = user_segment_size(ea);
|
||||
@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
/* Not a valid range
|
||||
* Send the problem up to do_page_fault
|
||||
*/
|
||||
return 1;
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
|
||||
|
||||
/* Bad address. */
|
||||
if (!vsid) {
|
||||
DBG_LOW("Bad address!\n");
|
||||
return 1;
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
/* Get pgdir */
|
||||
pgdir = mm->pgd;
|
||||
if (pgdir == NULL)
|
||||
return 1;
|
||||
if (pgdir == NULL) {
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Check CPU locality */
|
||||
tmp = cpumask_of(smp_processor_id());
|
||||
@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
|
||||
if (ptep == NULL || !pte_present(*ptep)) {
|
||||
DBG_LOW(" no PTE !\n");
|
||||
return 1;
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Add _PAGE_PRESENT to the required access perm */
|
||||
@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
*/
|
||||
if (access & ~pte_val(*ptep)) {
|
||||
DBG_LOW(" no access !\n");
|
||||
return 1;
|
||||
rc = 1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
if (hugeshift)
|
||||
return __hash_page_huge(ea, access, vsid, ptep, trap, local,
|
||||
if (hugeshift) {
|
||||
rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
|
||||
ssize, hugeshift, psize);
|
||||
goto bail;
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
pte_val(*(ptep + PTRS_PER_PTE)));
|
||||
#endif
|
||||
DBG_LOW(" -> rc=%d\n", rc);
|
||||
|
||||
bail:
|
||||
exception_exit(prev_state);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hash_page);
|
||||
@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
|
||||
*/
|
||||
void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
||||
if (user_mode(regs)) {
|
||||
#ifdef CONFIG_PPC_SUBPAGE_PROT
|
||||
if (rc == -2)
|
||||
@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
|
||||
_exception(SIGBUS, regs, BUS_ADRERR, address);
|
||||
} else
|
||||
bad_page_fault(regs, address, SIGBUS);
|
||||
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
|
||||
|
@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
|
||||
unsigned long phys)
|
||||
{
|
||||
int mapped = htab_bolt_mapping(start, start + page_size, phys,
|
||||
PAGE_KERNEL, mmu_vmemmap_psize,
|
||||
pgprot_val(PAGE_KERNEL),
|
||||
mmu_vmemmap_psize,
|
||||
mmu_kernel_ssize);
|
||||
BUG_ON(mapped < 0);
|
||||
}
|
||||
|
@ -13,11 +13,13 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/pmc.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/code-patching.h>
|
||||
|
||||
#define BHRB_MAX_ENTRIES 32
|
||||
#define BHRB_TARGET 0x0000000000000002
|
||||
@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
|
||||
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
|
||||
void power_pmu_flush_branch_stack(void) {}
|
||||
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
||||
#endif /* CONFIG_PPC32 */
|
||||
|
||||
static bool regs_use_siar(struct pt_regs *regs)
|
||||
@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* Reset all possible BHRB entries */
|
||||
static void power_pmu_bhrb_reset(void)
|
||||
{
|
||||
asm volatile(PPC_CLRBHRB);
|
||||
}
|
||||
|
||||
static void power_pmu_bhrb_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!ppmu->bhrb_nr)
|
||||
return;
|
||||
|
||||
/* Clear BHRB if we changed task context to avoid data leaks */
|
||||
if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
|
||||
power_pmu_bhrb_reset();
|
||||
cpuhw->bhrb_context = event->ctx;
|
||||
}
|
||||
cpuhw->bhrb_users++;
|
||||
}
|
||||
|
||||
static void power_pmu_bhrb_disable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!ppmu->bhrb_nr)
|
||||
return;
|
||||
|
||||
cpuhw->bhrb_users--;
|
||||
WARN_ON_ONCE(cpuhw->bhrb_users < 0);
|
||||
|
||||
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
|
||||
/* BHRB cannot be turned off when other
|
||||
* events are active on the PMU.
|
||||
*/
|
||||
|
||||
/* avoid stale pointer */
|
||||
cpuhw->bhrb_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Called from ctxsw to prevent one process's branch entries to
|
||||
* mingle with the other process's entries during context switch.
|
||||
*/
|
||||
void power_pmu_flush_branch_stack(void)
|
||||
{
|
||||
if (ppmu->bhrb_nr)
|
||||
power_pmu_bhrb_reset();
|
||||
}
|
||||
/* Calculate the to address for a branch */
|
||||
static __u64 power_pmu_bhrb_to(u64 addr)
|
||||
{
|
||||
unsigned int instr;
|
||||
int ret;
|
||||
__u64 target;
|
||||
|
||||
if (is_kernel_addr(addr))
|
||||
return branch_target((unsigned int *)addr);
|
||||
|
||||
/* Userspace: need copy instruction here then translate it */
|
||||
pagefault_disable();
|
||||
ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
|
||||
if (ret) {
|
||||
pagefault_enable();
|
||||
return 0;
|
||||
}
|
||||
pagefault_enable();
|
||||
|
||||
target = branch_target(&instr);
|
||||
if ((!target) || (instr & BRANCH_ABSOLUTE))
|
||||
return target;
|
||||
|
||||
/* Translate relative branch target from kernel to user address */
|
||||
return target - (unsigned long)&instr + addr;
|
||||
}
|
||||
|
||||
/* Processing BHRB entries */
|
||||
void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
||||
{
|
||||
u64 val;
|
||||
u64 addr;
|
||||
int r_index, u_index, pred;
|
||||
|
||||
r_index = 0;
|
||||
u_index = 0;
|
||||
while (r_index < ppmu->bhrb_nr) {
|
||||
/* Assembly read function */
|
||||
val = read_bhrb(r_index++);
|
||||
if (!val)
|
||||
/* Terminal marker: End of valid BHRB entries */
|
||||
break;
|
||||
else {
|
||||
addr = val & BHRB_EA;
|
||||
pred = val & BHRB_PREDICTION;
|
||||
|
||||
if (!addr)
|
||||
/* invalid entry */
|
||||
continue;
|
||||
|
||||
/* Branches are read most recent first (ie. mfbhrb 0 is
|
||||
* the most recent branch).
|
||||
* There are two types of valid entries:
|
||||
* 1) a target entry which is the to address of a
|
||||
* computed goto like a blr,bctr,btar. The next
|
||||
* entry read from the bhrb will be branch
|
||||
* corresponding to this target (ie. the actual
|
||||
* blr/bctr/btar instruction).
|
||||
* 2) a from address which is an actual branch. If a
|
||||
* target entry proceeds this, then this is the
|
||||
* matching branch for that target. If this is not
|
||||
* following a target entry, then this is a branch
|
||||
* where the target is given as an immediate field
|
||||
* in the instruction (ie. an i or b form branch).
|
||||
* In this case we need to read the instruction from
|
||||
* memory to determine the target/to address.
|
||||
*/
|
||||
|
||||
if (val & BHRB_TARGET) {
|
||||
/* Target branches use two entries
|
||||
* (ie. computed gotos/XL form)
|
||||
*/
|
||||
cpuhw->bhrb_entries[u_index].to = addr;
|
||||
cpuhw->bhrb_entries[u_index].mispred = pred;
|
||||
cpuhw->bhrb_entries[u_index].predicted = ~pred;
|
||||
|
||||
/* Get from address in next entry */
|
||||
val = read_bhrb(r_index++);
|
||||
addr = val & BHRB_EA;
|
||||
if (val & BHRB_TARGET) {
|
||||
/* Shouldn't have two targets in a
|
||||
row.. Reset index and try again */
|
||||
r_index--;
|
||||
addr = 0;
|
||||
}
|
||||
cpuhw->bhrb_entries[u_index].from = addr;
|
||||
} else {
|
||||
/* Branches to immediate field
|
||||
(ie I or B form) */
|
||||
cpuhw->bhrb_entries[u_index].from = addr;
|
||||
cpuhw->bhrb_entries[u_index].to =
|
||||
power_pmu_bhrb_to(addr);
|
||||
cpuhw->bhrb_entries[u_index].mispred = pred;
|
||||
cpuhw->bhrb_entries[u_index].predicted = ~pred;
|
||||
}
|
||||
u_index++;
|
||||
|
||||
}
|
||||
}
|
||||
cpuhw->bhrb_stack.nr = u_index;
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
static void perf_event_interrupt(struct pt_regs *regs);
|
||||
@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||
return n;
|
||||
}
|
||||
|
||||
/* Reset all possible BHRB entries */
|
||||
static void power_pmu_bhrb_reset(void)
|
||||
{
|
||||
asm volatile(PPC_CLRBHRB);
|
||||
}
|
||||
|
||||
void power_pmu_bhrb_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!ppmu->bhrb_nr)
|
||||
return;
|
||||
|
||||
/* Clear BHRB if we changed task context to avoid data leaks */
|
||||
if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
|
||||
power_pmu_bhrb_reset();
|
||||
cpuhw->bhrb_context = event->ctx;
|
||||
}
|
||||
cpuhw->bhrb_users++;
|
||||
}
|
||||
|
||||
void power_pmu_bhrb_disable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!ppmu->bhrb_nr)
|
||||
return;
|
||||
|
||||
cpuhw->bhrb_users--;
|
||||
WARN_ON_ONCE(cpuhw->bhrb_users < 0);
|
||||
|
||||
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
|
||||
/* BHRB cannot be turned off when other
|
||||
* events are active on the PMU.
|
||||
*/
|
||||
|
||||
/* avoid stale pointer */
|
||||
cpuhw->bhrb_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a event to the PMU.
|
||||
* If all events are not already frozen, then we disable and
|
||||
@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called from ctxsw to prevent one process's branch entries to
|
||||
* mingle with the other process's entries during context switch.
|
||||
*/
|
||||
void power_pmu_flush_branch_stack(void)
|
||||
{
|
||||
if (ppmu->bhrb_nr)
|
||||
power_pmu_bhrb_reset();
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 if we might be able to put event on a limited PMC,
|
||||
* or 0 if not.
|
||||
@ -1458,77 +1567,6 @@ struct pmu power_pmu = {
|
||||
.flush_branch_stack = power_pmu_flush_branch_stack,
|
||||
};
|
||||
|
||||
/* Processing BHRB entries */
|
||||
void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
||||
{
|
||||
u64 val;
|
||||
u64 addr;
|
||||
int r_index, u_index, target, pred;
|
||||
|
||||
r_index = 0;
|
||||
u_index = 0;
|
||||
while (r_index < ppmu->bhrb_nr) {
|
||||
/* Assembly read function */
|
||||
val = read_bhrb(r_index);
|
||||
|
||||
/* Terminal marker: End of valid BHRB entries */
|
||||
if (val == 0) {
|
||||
break;
|
||||
} else {
|
||||
/* BHRB field break up */
|
||||
addr = val & BHRB_EA;
|
||||
pred = val & BHRB_PREDICTION;
|
||||
target = val & BHRB_TARGET;
|
||||
|
||||
/* Probable Missed entry: Not applicable for POWER8 */
|
||||
if ((addr == 0) && (target == 0) && (pred == 1)) {
|
||||
r_index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Real Missed entry: Power8 based missed entry */
|
||||
if ((addr == 0) && (target == 1) && (pred == 1)) {
|
||||
r_index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Reserved condition: Not a valid entry */
|
||||
if ((addr == 0) && (target == 1) && (pred == 0)) {
|
||||
r_index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Is a target address */
|
||||
if (val & BHRB_TARGET) {
|
||||
/* First address cannot be a target address */
|
||||
if (r_index == 0) {
|
||||
r_index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Update target address for the previous entry */
|
||||
cpuhw->bhrb_entries[u_index - 1].to = addr;
|
||||
cpuhw->bhrb_entries[u_index - 1].mispred = pred;
|
||||
cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
|
||||
|
||||
/* Dont increment u_index */
|
||||
r_index++;
|
||||
} else {
|
||||
/* Update address, flags for current entry */
|
||||
cpuhw->bhrb_entries[u_index].from = addr;
|
||||
cpuhw->bhrb_entries[u_index].mispred = pred;
|
||||
cpuhw->bhrb_entries[u_index].predicted = ~pred;
|
||||
|
||||
/* Successfully popullated one entry */
|
||||
u_index++;
|
||||
r_index++;
|
||||
}
|
||||
}
|
||||
}
|
||||
cpuhw->bhrb_stack.nr = u_index;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A counter has overflowed; update its count and record
|
||||
* things if requested. Note that interrupts are hard-disabled
|
||||
|
@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
|
||||
|
||||
config RTAS_PROC
|
||||
bool "Proc interface to RTAS"
|
||||
depends on PPC_RTAS
|
||||
depends on PPC_RTAS && PROC_FS
|
||||
default y
|
||||
|
||||
config RTAS_FLASH
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/opal.h>
|
||||
#include <asm/firmware.h>
|
||||
|
||||
@ -28,6 +29,8 @@ struct opal {
|
||||
static struct device_node *opal_node;
|
||||
static DEFINE_SPINLOCK(opal_write_lock);
|
||||
extern u64 opal_mc_secondary_handler[];
|
||||
static unsigned int *opal_irqs;
|
||||
static unsigned int opal_irq_count;
|
||||
|
||||
int __init early_init_dt_scan_opal(unsigned long node,
|
||||
const char *uname, int depth, void *data)
|
||||
@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
|
||||
opal.entry, entryp, entrysz);
|
||||
|
||||
powerpc_firmware_features |= FW_FEATURE_OPAL;
|
||||
if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
|
||||
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
|
||||
powerpc_firmware_features |= FW_FEATURE_OPALv2;
|
||||
powerpc_firmware_features |= FW_FEATURE_OPALv3;
|
||||
printk("OPAL V3 detected !\n");
|
||||
} else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
|
||||
powerpc_firmware_features |= FW_FEATURE_OPALv2;
|
||||
printk("OPAL V2 detected !\n");
|
||||
} else {
|
||||
@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
|
||||
rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
|
||||
len = total_len;
|
||||
rc = opal_console_write(vtermno, &len, data);
|
||||
|
||||
/* Closed or other error drop */
|
||||
if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
|
||||
rc != OPAL_BUSY_EVENT) {
|
||||
written = total_len;
|
||||
break;
|
||||
}
|
||||
if (rc == OPAL_SUCCESS) {
|
||||
total_len -= len;
|
||||
data += len;
|
||||
@ -316,6 +330,8 @@ static int __init opal_init(void)
|
||||
irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
|
||||
pr_debug("opal: Found %d interrupts reserved for OPAL\n",
|
||||
irqs ? (irqlen / 4) : 0);
|
||||
opal_irq_count = irqlen / 4;
|
||||
opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
|
||||
for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
|
||||
unsigned int hwirq = be32_to_cpup(irqs);
|
||||
unsigned int irq = irq_create_mapping(NULL, hwirq);
|
||||
@ -327,7 +343,19 @@ static int __init opal_init(void)
|
||||
if (rc)
|
||||
pr_warning("opal: Error %d requesting irq %d"
|
||||
" (0x%x)\n", rc, irq, hwirq);
|
||||
opal_irqs[i] = irq;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(opal_init);
|
||||
|
||||
void opal_shutdown(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < opal_irq_count; i++) {
|
||||
if (opal_irqs[i])
|
||||
free_irq(opal_irqs[i], 0);
|
||||
opal_irqs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1048,6 +1048,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
|
||||
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
|
||||
}
|
||||
|
||||
static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
|
||||
{
|
||||
opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
|
||||
OPAL_ASSERT_RESET);
|
||||
}
|
||||
|
||||
void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
|
||||
{
|
||||
struct pci_controller *hose;
|
||||
@ -1178,6 +1184,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
|
||||
/* Setup TCEs */
|
||||
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
|
||||
|
||||
/* Setup shutdown function for kexec */
|
||||
phb->shutdown = pnv_pci_ioda_shutdown;
|
||||
|
||||
/* Setup MSI support */
|
||||
pnv_pci_init_ioda_msis(phb);
|
||||
|
||||
|
@ -450,6 +450,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
|
||||
pnv_pci_dma_fallback_setup(hose, pdev);
|
||||
}
|
||||
|
||||
void pnv_pci_shutdown(void)
|
||||
{
|
||||
struct pci_controller *hose;
|
||||
|
||||
list_for_each_entry(hose, &hose_list, list_node) {
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
|
||||
if (phb && phb->shutdown)
|
||||
phb->shutdown(phb);
|
||||
}
|
||||
}
|
||||
|
||||
/* Fixup wrong class code in p7ioc and p8 root complex */
|
||||
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -86,6 +86,7 @@ struct pnv_phb {
|
||||
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
|
||||
void (*fixup_phb)(struct pci_controller *hose);
|
||||
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
|
||||
void (*shutdown)(struct pnv_phb *phb);
|
||||
|
||||
union {
|
||||
struct {
|
||||
@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
|
||||
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
|
||||
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
|
||||
u64 *startp, u64 *endp);
|
||||
|
||||
#endif /* __POWERNV_PCI_H */
|
||||
|
@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern void pnv_pci_init(void);
|
||||
extern void pnv_pci_shutdown(void);
|
||||
#else
|
||||
static inline void pnv_pci_init(void) { }
|
||||
static inline void pnv_pci_shutdown(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _POWERNV_H */
|
||||
|
@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
|
||||
if (root)
|
||||
model = of_get_property(root, "model", NULL);
|
||||
seq_printf(m, "machine\t\t: PowerNV %s\n", model);
|
||||
if (firmware_has_feature(FW_FEATURE_OPALv2))
|
||||
if (firmware_has_feature(FW_FEATURE_OPALv3))
|
||||
seq_printf(m, "firmware\t: OPAL v3\n");
|
||||
else if (firmware_has_feature(FW_FEATURE_OPALv2))
|
||||
seq_printf(m, "firmware\t: OPAL v2\n");
|
||||
else if (firmware_has_feature(FW_FEATURE_OPAL))
|
||||
seq_printf(m, "firmware\t: OPAL v1\n");
|
||||
@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
|
||||
{
|
||||
}
|
||||
|
||||
static void pnv_shutdown(void)
|
||||
{
|
||||
/* Let the PCI code clear up IODA tables */
|
||||
pnv_pci_shutdown();
|
||||
|
||||
/* And unregister all OPAL interrupts so they don't fire
|
||||
* up while we kexec
|
||||
*/
|
||||
opal_shutdown();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
|
||||
{
|
||||
@ -187,6 +200,7 @@ define_machine(powernv) {
|
||||
.init_IRQ = pnv_init_IRQ,
|
||||
.show_cpuinfo = pnv_show_cpuinfo,
|
||||
.progress = pnv_progress,
|
||||
.machine_shutdown = pnv_shutdown,
|
||||
.power_save = power7_idle,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
|
||||
|
||||
BUG_ON(nr < 0 || nr >= NR_CPUS);
|
||||
|
||||
/* On OPAL v2 the CPU are still spinning inside OPAL itself,
|
||||
* get them back now
|
||||
/*
|
||||
* If we already started or OPALv2 is not supported, we just
|
||||
* kick the CPU via the PACA
|
||||
*/
|
||||
if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
|
||||
pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
|
||||
rc = opal_start_cpu(pcpu, start_here);
|
||||
if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
|
||||
goto kick;
|
||||
|
||||
/*
|
||||
* At this point, the CPU can either be spinning on the way in
|
||||
* from kexec or be inside OPAL waiting to be started for the
|
||||
* first time. OPAL v3 allows us to query OPAL to know if it
|
||||
* has the CPUs, so we do that
|
||||
*/
|
||||
if (firmware_has_feature(FW_FEATURE_OPALv3)) {
|
||||
uint8_t status;
|
||||
|
||||
rc = opal_query_cpu_status(pcpu, &status);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warn("OPAL Error %ld starting CPU %d\n",
|
||||
pr_warn("OPAL Error %ld querying CPU %d state\n",
|
||||
rc, nr);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Already started, just kick it, probably coming from
|
||||
* kexec and spinning
|
||||
*/
|
||||
if (status == OPAL_THREAD_STARTED)
|
||||
goto kick;
|
||||
|
||||
/*
|
||||
* Available/inactive, let's kick it
|
||||
*/
|
||||
if (status == OPAL_THREAD_INACTIVE) {
|
||||
pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
|
||||
nr, pcpu);
|
||||
rc = opal_start_cpu(pcpu, start_here);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warn("OPAL Error %ld starting CPU %d\n",
|
||||
rc, nr);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* An unavailable CPU (or any other unknown status)
|
||||
* shouldn't be started. It should also
|
||||
* not be in the possible map but currently it can
|
||||
* happen
|
||||
*/
|
||||
pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
|
||||
" (status %d)...\n", nr, pcpu, status);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* On OPAL v2, we just kick it and hope for the best,
|
||||
* we must not test the error from opal_start_cpu() or
|
||||
* we would fail to get CPUs from kexec.
|
||||
*/
|
||||
opal_start_cpu(pcpu, start_here);
|
||||
}
|
||||
kick:
|
||||
return smp_generic_kick_cpu(nr);
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ config PPC_PSERIES
|
||||
select PPC_PCI_CHOICE if EXPERT
|
||||
select ZLIB_DEFLATE
|
||||
select PPC_DOORBELL
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
default y
|
||||
|
||||
config PPC_SPLPAR
|
||||
|
@ -16,6 +16,7 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/stat.h>
|
||||
@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
cpumask_var_t offline_mask;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
|
||||
return -ENOMEM;
|
||||
|
||||
stream_id = simple_strtoul(buf, NULL, 16);
|
||||
|
||||
do {
|
||||
@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
|
||||
} while (rc == -EAGAIN);
|
||||
|
||||
if (!rc) {
|
||||
/* All present CPUs must be online */
|
||||
cpumask_andnot(offline_mask, cpu_present_mask,
|
||||
cpu_online_mask);
|
||||
rc = rtas_online_cpus_mask(offline_mask);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not bring present CPUs online.\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
stop_topology_update();
|
||||
rc = pm_suspend(PM_SUSPEND_MEM);
|
||||
start_topology_update();
|
||||
|
||||
/* Take down CPUs not online prior to suspend */
|
||||
if (!rtas_offline_cpus_mask(offline_mask))
|
||||
pr_warn("%s: Could not restore CPUs to offline "
|
||||
"state.\n", __func__);
|
||||
}
|
||||
|
||||
stream_id = 0;
|
||||
|
||||
if (!rc)
|
||||
rc = count;
|
||||
out:
|
||||
free_cpumask_var(offline_mask);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
|
||||
xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
|
||||
wsp_ics_set_xive(ics, hw_irq, xive);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static struct irq_chip wsp_irq_chip = {
|
||||
|
@ -64,6 +64,8 @@ endif
|
||||
|
||||
obj-$(CONFIG_PPC_SCOM) += scom.o
|
||||
|
||||
obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
|
||||
|
||||
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
||||
|
||||
obj-$(CONFIG_PPC_XICS) += xics/
|
||||
|
@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
|
||||
ev_int_set_config(src, config, prio, cpuid);
|
||||
spin_unlock_irqrestore(&ehv_pic_lock, flags);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
|
||||
|
@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
mpic_physmask(mask));
|
||||
}
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
|
||||
|
105
arch/powerpc/sysdev/udbg_memcons.c
Normal file
105
arch/powerpc/sysdev/udbg_memcons.c
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* A udbg backend which logs messages and reads input from in memory
|
||||
* buffers.
|
||||
*
|
||||
* The console output can be read from memcons_output which is a
|
||||
* circular buffer whose next write position is stored in memcons.output_pos.
|
||||
*
|
||||
* Input may be passed by writing into the memcons_input buffer when it is
|
||||
* empty. The input buffer is empty when both input_pos == input_start and
|
||||
* *input_start == '\0'.
|
||||
*
|
||||
* Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
|
||||
* Copyright (C) 2013 Alistair Popple, IBM Corp
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/udbg.h>
|
||||
|
||||
struct memcons {
|
||||
char *output_start;
|
||||
char *output_pos;
|
||||
char *output_end;
|
||||
char *input_start;
|
||||
char *input_pos;
|
||||
char *input_end;
|
||||
};
|
||||
|
||||
static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
|
||||
static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
|
||||
|
||||
struct memcons memcons = {
|
||||
.output_start = memcons_output,
|
||||
.output_pos = memcons_output,
|
||||
.output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
|
||||
.input_start = memcons_input,
|
||||
.input_pos = memcons_input,
|
||||
.input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
|
||||
};
|
||||
|
||||
void memcons_putc(char c)
|
||||
{
|
||||
char *new_output_pos;
|
||||
|
||||
*memcons.output_pos = c;
|
||||
wmb();
|
||||
new_output_pos = memcons.output_pos + 1;
|
||||
if (new_output_pos >= memcons.output_end)
|
||||
new_output_pos = memcons.output_start;
|
||||
|
||||
memcons.output_pos = new_output_pos;
|
||||
}
|
||||
|
||||
int memcons_getc_poll(void)
|
||||
{
|
||||
char c;
|
||||
char *new_input_pos;
|
||||
|
||||
if (*memcons.input_pos) {
|
||||
c = *memcons.input_pos;
|
||||
|
||||
new_input_pos = memcons.input_pos + 1;
|
||||
if (new_input_pos >= memcons.input_end)
|
||||
new_input_pos = memcons.input_start;
|
||||
else if (*new_input_pos == '\0')
|
||||
new_input_pos = memcons.input_start;
|
||||
|
||||
*memcons.input_pos = '\0';
|
||||
wmb();
|
||||
memcons.input_pos = new_input_pos;
|
||||
return c;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int memcons_getc(void)
|
||||
{
|
||||
int c;
|
||||
|
||||
while (1) {
|
||||
c = memcons_getc_poll();
|
||||
if (c == -1)
|
||||
cpu_relax();
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void udbg_init_memcons(void)
|
||||
{
|
||||
udbg_putc = memcons_putc;
|
||||
udbg_getc = memcons_getc;
|
||||
udbg_getc_poll = memcons_getc_poll;
|
||||
}
|
@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
|
||||
__func__, d->irq, hw_irq, server, rc);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static struct irq_chip ics_opal_irq_chip = {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user