2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00

Merge 4.8-rc3 into usb-next

We want the USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2016-08-21 21:33:17 -04:00
commit 1aaaa9acae
549 changed files with 6964 additions and 3053 deletions

View File

@ -14,6 +14,12 @@ add_random (RW)
This file allows to turn off the disk entropy contribution. Default
value of this file is '1'(on).
dax (RO)
--------
This file indicates whether the device supports Direct Access (DAX),
used by CPU-addressable storage to bypass the pagecache. It shows '1'
if true, '0' if not.
discard_granularity (RO)
-----------------------
This shows the size of internal allocation of the device in bytes, if
@ -46,6 +52,12 @@ hw_sector_size (RO)
-------------------
This is the hardware sector size of the device, in bytes.
io_poll (RW)
------------
When read, this file shows the total number of block IO polls and how
many returned success. Writing '0' to this file will disable polling
for this device. Writing any non-zero value will enable this feature.
iostats (RW)
-------------
This file is used to control (on/off) the iostats accounting of the
@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.
write_same_max_bytes (RO)
-------------------------
This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
Jens Axboe <jens.axboe@oracle.com>, February 2009

View File

@ -131,7 +131,7 @@ pygments_style = 'sphinx'
todo_include_todos = False
primary_domain = 'C'
highlight_language = 'C'
highlight_language = 'guess'
# -- Options for HTML output ----------------------------------------------

View File

@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
implemented in this driver.
Specification of the chip can be found here:
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf

View File

@ -366,8 +366,6 @@ Domain`_ references.
Cross-referencing from reStructuredText
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. highlight:: none
To cross-reference the functions and types defined in the kernel-doc comments
from reStructuredText documents, please use the `Sphinx C Domain`_
references. For example::
@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
Function documentation
----------------------
.. highlight:: c
The general format of a function and function-like macro kernel-doc comment is::
/**
@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
Converting DocBook to Sphinx
----------------------------
.. highlight:: none
Over time, we expect all of the documents under ``Documentation/DocBook`` to be
converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,

View File

@ -790,13 +790,12 @@ The kernel interface functions are as follows:
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
data message has been used up, rxrpc_kernel_data_consumed() should be
called on it.
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
is possible to get extra refs on all types of message for later freeing,
but this may pin the state of a call until the message is finally freed.
(*) Accept an incoming call.
@ -821,12 +820,14 @@ The kernel interface functions are as follows:
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
(*) Record the delivery of a data message.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
This is used to record a data message as having been consumed and to
update the ACK state for the call. The message must still be passed to
rxrpc_kernel_free_skb() for disposal by the caller.
(*) Free a message.

View File

@ -164,7 +164,32 @@ load n/2 modules more and try again.
Again, if you find the offending module(s), it(they) must be unloaded every time
before hibernation, and please report the problem with it(them).
c) Advanced debugging
c) Using the "test_resume" hibernation option
/sys/power/disk generally tells the kernel what to do after creating a
hibernation image. One of the available options is "test_resume" which
causes the just created image to be used for immediate restoration. Namely,
after doing:
# echo test_resume > /sys/power/disk
# echo disk > /sys/power/state
a hibernation image will be created and a resume from it will be triggered
immediately without involving the platform firmware in any way.
That test can be used to check if failures to resume from hibernation are
related to bad interactions with the platform firmware. That is, if the above
works every time, but resume from actual hibernation does not work or is
unreliable, the platform firmware may be responsible for the failures.
On architectures and platforms that support using different kernels to restore
hibernation images (that is, the kernel used to read the image from storage and
load it into memory is different from the one included in the image) or support
kernel address space randomization, it also can be used to check if failures
to resume may be related to the differences between the restore and image
kernels.
d) Advanced debugging
In case that hibernation does not work on your system even in the minimal
configuration and compiling more drivers as modules is not practical or some

View File

@ -1,75 +1,76 @@
Power Management Interface
Power Management Interface for System Sleep
Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
The power management subsystem provides a unified sysfs interface to
userspace, regardless of what architecture or platform one is
running. The interface exists in /sys/power/ directory (assuming sysfs
is mounted at /sys).
The power management subsystem provides userspace with a unified sysfs interface
for system sleep regardless of the underlying system architecture or platform.
The interface is located in the /sys/power/ directory (assuming that sysfs is
mounted at /sys).
/sys/power/state controls system power state. Reading from this file
returns what states are supported, which is hard-coded to 'freeze',
'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
(Suspend-to-Disk).
/sys/power/state is the system sleep state control file.
Writing to this file one of those strings causes the system to
transition into that state. Please see the file
Documentation/power/states.txt for a description of each of those
states.
Reading from it returns a list of supported sleep states, encoded as:
'freeze' (Suspend-to-Idle)
'standby' (Power-On Suspend)
'mem' (Suspend-to-RAM)
'disk' (Suspend-to-Disk)
/sys/power/disk controls the operating mode of the suspend-to-disk
mechanism. Suspend-to-disk can be handled in several ways. We have a
few options for putting the system to sleep - using the platform driver
(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
system (for testing).
Suspend-to-Idle is always supported. Suspend-to-Disk is always supported
too as long the kernel has been configured to support hibernation at all
(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support
for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
platform.
Additionally, /sys/power/disk can be used to turn on one of the two testing
modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the
suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is
in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then,
we are able to look in the log messages and work out, for example, which code
is being slow and which device drivers are misbehaving.
If one of the strings listed in /sys/power/state is written to it, the system
will attempt to transition into the corresponding sleep state. Refer to
Documentation/power/states.txt for a description of each of those states.
Reading from this file will display all supported modes and the currently
selected one in brackets, for example
/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
Specifically, it tells the kernel what to do after creating a hibernation image.
[shutdown] reboot test testproc
Reading from it returns a list of supported options encoded as:
Writing to this file will accept one of
'platform' (put the system into sleep using a platform-provided method)
'shutdown' (shut the system down)
'reboot' (reboot the system)
'suspend' (trigger a Suspend-to-RAM transition)
'test_resume' (resume-after-hibernation test mode)
'platform' (only if the platform supports it)
'shutdown'
'reboot'
'testproc'
'test'
The currently selected option is printed in square brackets.
/sys/power/image_size controls the size of the image created by
the suspend-to-disk mechanism. It can be written a string
representing a non-negative integer that will be used as an upper
limit of the image size, in bytes. The suspend-to-disk mechanism will
do its best to ensure the image size will not exceed that number. However,
if this turns out to be impossible, it will try to suspend anyway using the
smallest image possible. In particular, if "0" is written to this file, the
suspend image will be as small as possible.
The 'platform' option is only available if the platform provides a special
mechanism to put the system to sleep after creating a hibernation image (ACPI
does that, for example). The 'suspend' option is available if Suspend-to-RAM
is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
description of the 'test_resume' option.
Reading from this file will display the current image size limit, which
is set to 2/5 of available RAM by default.
To select an option, write the string representing it to /sys/power/disk.
/sys/power/pm_trace controls the code which saves the last PM event point in
the RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume). Namely, the RTC is only
used to save the last PM event point if this file contains '1'. Initially it
contains '0' which may be changed to '1' by writing a string representing a
nonzero integer into it.
/sys/power/image_size controls the size of hibernation images.
To use this debugging feature you should attempt to suspend the machine, then
reboot it and run
It can be written a string representing a non-negative integer that will be
used as a best-effort upper limit of the image size, in bytes. The hibernation
core will do its best to ensure that the image size will not exceed that number.
However, if that turns out to be impossible to achieve, a hibernation image will
still be created and its size will be as small as possible. In particular,
writing '0' to this file will enforce hibernation images to be as small as
possible.
dmesg -s 1000000 | grep 'hash matches'
Reading from this file returns the current image size limit, which is set to
around 2/5 of available RAM by default.
CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
set to a random invalid time after a resume.
/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
or resume event point in the RTC across reboots.
It helps to debug hard lockups or reboots due to device driver failures that
occur during system suspend or resume (which is more common) more effectively.
If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
event point in turn will be stored in the RTC memory (overwriting the actual
RTC information), so it will survive a system crash if one occurs right after
storing it and it can be used later to identify the driver that caused the crash
to happen (see Documentation/power/s2ram.txt for more information).
Initially it contains '0' which may be changed to '1' by writing a string
representing a nonzero integer into it.

View File

@ -42,11 +42,12 @@
caption a.headerlink { opacity: 0; }
caption a.headerlink:hover { opacity: 1; }
/* inline literal: drop the borderbox and red color */
/* inline literal: drop the borderbox, padding and red color */
code, .rst-content tt, .rst-content code {
color: inherit;
border: none;
padding: unset;
background: inherit;
font-size: 85%;
}

View File

@ -1004,6 +1004,7 @@ N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@free-electrons.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-alpine/
F: arch/arm/boot/dts/alpine*
@ -4524,6 +4525,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
EDAC-SKYLAKE
M: Tony Luck <tony.luck@intel.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/skx_edac.c
EDAC-XGENE
APPLIED MICRO (APM) X-GENE SOC EDAC
M: Loc Ho <lho@apm.com>

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc3
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@ -635,13 +635,6 @@ endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
PHONY += gcc-plugins
gcc-plugins: scripts_basic
ifdef CONFIG_GCC_PLUGINS
$(Q)$(MAKE) $(build)=scripts/gcc-plugins
endif
@:
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM

View File

@ -461,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
endchoice
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
An architecture should select this if it can walk the kernel stack
frames to determine if an object is part of either the arguments
or local variables (i.e. that it excludes saved return addresses,
and similar) by implementing an inline arch_within_stack_frames(),
which is used by CONFIG_HARDENED_USERCOPY.
config HAVE_CONTEXT_TRACKING
bool
help

View File

@ -35,6 +35,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU

View File

@ -260,12 +260,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y))
platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
ifeq ($(KBUILD_SRC),)
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
else
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
endif
endif
endif
export TEXT_OFFSET GZFLAGS MMUEXT

View File

@ -70,13 +70,12 @@
* associativity as these may be erroneously set
* up by boot loader(s).
*/
cache-size = <1048576>; // 1MB
cache-sets = <4096>;
cache-size = <131072>; // 128KB
cache-sets = <512>;
cache-line-size = <32>;
arm,parity-disable;
arm,tag-latency = <1>;
arm,data-latency = <1 1>;
arm,dirty-latency = <1>;
arm,tag-latency = <1 1 1>;
arm,data-latency = <1 1 1>;
};
scu: scu@1f000000 {

View File

@ -42,7 +42,7 @@
};
syscon {
compatible = "arm,integrator-ap-syscon";
compatible = "arm,integrator-ap-syscon", "syscon";
reg = <0x11000000 0x100>;
interrupt-parent = <&pic>;
/* These are the logical module IRQs */

View File

@ -94,7 +94,7 @@
};
syscon {
compatible = "arm,integrator-cp-syscon";
compatible = "arm,integrator-cp-syscon", "syscon";
reg = <0xcb000000 0x100>;
};

View File

@ -70,14 +70,6 @@
cpu_on = <0x84000003>;
};
psci {
compatible = "arm,psci";
method = "smc";
cpu_suspend = <0x84000001>;
cpu_off = <0x84000002>;
cpu_on = <0x84000003>;
};
soc {
#address-cells = <1>;
#size-cells = <1>;

View File

@ -1382,7 +1382,7 @@
* Pin 41: BR_UART1_TXD
* Pin 44: BR_UART1_RXD
*/
serial@70006000 {
serial@0,70006000 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
@ -1394,7 +1394,7 @@
* Pin 71: UART2_CTS_L
* Pin 74: UART2_RTS_L
*/
serial@70006040 {
serial@0,70006040 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};

View File

@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
CONFIG_PRINTK_TIME=1
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y

View File

@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
CONFIG_PRINTK_TIME=1
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y

View File

@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
unsigned int __ua_flags;
check_object_size(to, n, false);
__ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
@ -495,11 +498,15 @@ static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags = uaccess_save_and_enable();
unsigned int __ua_flags;
check_object_size(from, n, true);
__ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
check_object_size(from, n, true);
return arm_copy_to_user(to, from, n);
#endif
}

View File

@ -295,6 +295,7 @@ __und_svc_fault:
bl __und_fault
__und_svc_finish:
get_thread_info tsk
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
svc_exit r5 @ return from exception
UNWIND(.fnend )

View File

@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
mm_segment_t fs;
long ret, err, i;
if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
if (maxevents <= 0 ||
maxevents > (INT_MAX/sizeof(*kbuf)) ||
maxevents > (INT_MAX/sizeof(*events)))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
return -EFAULT;
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
return -EFAULT;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)
return -ENOMEM;

View File

@ -1009,9 +1009,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
int ret;
if (!vgic_present)
return -ENXIO;
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
mutex_lock(&kvm->lock);
ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
mutex_unlock(&kvm->lock);
return ret;
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;

View File

@ -1,13 +1,13 @@
menuconfig ARCH_CLPS711X
bool "Cirrus Logic EP721x/EP731x-based"
depends on ARCH_MULTI_V4T
select ARCH_REQUIRE_GPIOLIB
select AUTO_ZRELADDR
select CLKSRC_OF
select CLPS711X_TIMER
select COMMON_CLK
select CPU_ARM720T
select GENERIC_CLOCKEVENTS
select GPIOLIB
select MFD_SYSCON
select OF_IRQ
select USE_OF

View File

@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the GPC power domain driver will not be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
return 0;
}
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);

View File

@ -1,5 +1,4 @@
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-I$(srctree)/arch/arm/plat-orion/include
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
CFLAGS_pmsu.o := -march=armv7-a

View File

@ -11,11 +11,13 @@ if ARCH_OXNAS
config MACH_OX810SE
bool "Support OX810SE Based Products"
select ARCH_HAS_RESET_CONTROLLER
select COMMON_CLK_OXNAS
select CPU_ARM926T
select MFD_SYSCON
select OXNAS_RPS_TIMER
select PINCTRL_OXNAS
select RESET_CONTROLLER
select RESET_OXNAS
select VERSATILE_FPGA_IRQ
help

View File

@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h> /* symbol_get ; symbol_put */
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/major.h>

View File

@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h> /* symbol_get ; symbol_put */
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio_keys.h>

View File

@ -1,8 +1,7 @@
#
# Makefile for the linux kernel.
#
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-I$(srctree)/arch/arm/plat-versatile/include
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
obj-y := core.o
obj-$(CONFIG_REALVIEW_DT) += realview-dt.o

View File

@ -5,7 +5,7 @@
#
# Licensed under GPLv2
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include
# Core

View File

@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
bool __init shmobile_smp_init_fallback_ops(void)
{
/* fallback on PSCI/smp_ops if no other DT based method is detected */
if (!IS_ENABLED(CONFIG_SMP))
return false;
return platform_can_secondary_boot() ? true : false;
}

View File

@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
{
void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
BUG_ON(!ptr);
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
BUG();
return ptr;
}
@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
{
phys_addr_t memblock_limit = 0;
int highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
u64 vmalloc_limit;
struct memblock_region *reg;
bool should_use_highmem = false;
/*
* Let's use our own (unoptimized) equivalent of __pa() that is
* not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
* The result is used as the upper bound on physical memory address
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;
pr_notice("Truncating RAM at %pa-%pa to -%pa",
&block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
pr_notice("Truncating RAM at %pa-%pa",
&block_start, &block_end);
block_end = vmalloc_limit;
pr_cont(" to -%pa", &block_end);
memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}

View File

@ -54,6 +54,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)

View File

@ -8,7 +8,7 @@ config ARCH_SUNXI
config ARCH_ALPINE
bool "Annapurna Labs Alpine platform"
select ALPINE_MSI
select ALPINE_MSI if PCI
help
This enables support for the Annapurna Labs Alpine
Soc family.
@ -66,7 +66,7 @@ config ARCH_LG1K
config ARCH_HISI
bool "Hisilicon SoC Family"
select ARM_TIMER_SP804
select HISILICON_IRQ_MBIGEN
select HISILICON_IRQ_MBIGEN if PCI
help
This enables support for Hisilicon ARMv8 SoC family

View File

@ -12,6 +12,7 @@
/dts-v1/;
#include "exynos7.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/samsung,s2mps11.h>
/ {
model = "Samsung Exynos7 Espresso board based on EXYNOS7";
@ -43,6 +44,8 @@
&rtc {
status = "okay";
clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
clock-names = "rtc", "rtc_src";
};
&watchdog {

View File

@ -1,4 +1,3 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_NET_NS is not set
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_XEN=y
CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@ -84,10 +88,37 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_IPV6=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_BRIDGE=m
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
CONFIG_BPF_JIT=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
@ -103,6 +134,7 @@ CONFIG_MTD=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y
CONFIG_SRAM=y
# CONFIG_SCSI_PROC_FS is not set
@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_TUN=y
CONFIG_VETH=m
CONFIG_VIRTIO_NET=y
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
CONFIG_PWM_SAMSUNG=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y

View File

@ -22,7 +22,6 @@
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
#define MAX_STACK_SIZE 128
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@ -47,7 +46,6 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
struct pt_regs jprobe_saved_regs;
char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);

View File

@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
return __arch_copy_from_user(to, from, n);
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
return __arch_copy_to_user(to, from, n);
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
if (access_ok(VERIFY_READ, from, n))
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false);
n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
} else /* security hole - plug it */
memset(to, 0, n);
return n;
}
@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
{
kasan_check_read(from, n);
if (access_ok(VERIFY_WRITE, to, n))
if (access_ok(VERIFY_WRITE, to, n)) {
check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
}
return n;
}

View File

@ -353,6 +353,8 @@ el1_sync:
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@ -364,6 +366,11 @@ el1_sync:
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_ia:
/*
* Fall through to the Data abort case
*/
el1_da:
/*
* Data abort handling

View File

@ -35,6 +35,7 @@
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h>
#include <asm/sysreg.h>
#include <asm/virt.h>
/*
@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC)));
/* Load our new page tables */
asm volatile("msr ttbr0_el1, %0;"
"isb;"
"tlbi vmalle1is;"
"dsb ish;"
"isb" : : "r"(virt_to_phys(pgd)));
/*
* Load our new page tables. A strict BBM approach requires that we
* ensure that TLBs are free of any entries that may overlap with the
* global mappings we are about to install.
*
* For a real hibernate/resume cycle TTBR0 currently points to a zero
* page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
* runtime services), while for a userspace-driven test_resume cycle it
* points to userspace page tables (and we must point it at a zero page
* ourselves). Elsewhere we only (un)install the idmap with preemption
* disabled, so T0SZ should be as required regardless.
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
write_sysreg(virt_to_phys(pgd), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst);
@ -393,6 +404,38 @@ int swsusp_arch_resume(void)
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
/*
* Restoring the memory image will overwrite the ttbr1 page tables.
* Create a second copy of just the linear map, and use this when
* restoring.
*/
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) {
pr_err("Failed to allocate memory for temporary page tables.");
rc = -ENOMEM;
goto out;
}
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
if (rc)
goto out;
/*
* Since we only copied the linear map, we need to find restore_pblist's
* linear map address.
*/
lm_restore_pblist = LMADDR(restore_pblist);
/*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
if (!zero_page) {
pr_err("Failed to allocate zero page.");
rc = -ENOMEM;
goto out;
}
/*
* Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects.
@ -418,27 +461,6 @@ int swsusp_arch_resume(void)
*/
__flush_dcache_area(hibernate_exit, exit_size);
/*
* Restoring the memory image will overwrite the ttbr1 page tables.
* Create a second copy of just the linear map, and use this when
* restoring.
*/
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) {
pr_err("Failed to allocate memory for temporary page tables.");
rc = -ENOMEM;
goto out;
}
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
if (rc)
goto out;
/*
* Since we only copied the linear map, we need to find restore_pblist's
* linear map address.
*/
lm_restore_pblist = LMADDR(restore_pblist);
/*
* KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2.
@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
__hyp_set_vectors(el2_vectors);
}
/*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));

View File

@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
static inline unsigned long min_stack_size(unsigned long addr)
{
unsigned long size;
if (on_irq_stack(addr, raw_smp_processor_id()))
size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
else
size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
}
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
long stack_ptr = kernel_stack_pointer(regs);
kcb->jprobe_saved_regs = *regs;
/*
* As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
* Since we can't be sure where in the stack frame "stacked"
* pass-by-value arguments are stored we just don't try to
* duplicate any of the stack. Do not use jprobes on functions that
* use more than 64 bytes (after padding each to an 8 byte boundary)
* of arguments, or pass individual arguments larger than 16 bytes.
*/
kasan_disable_current();
memcpy(kcb->jprobes_stack, (void *)stack_ptr,
min_stack_size(stack_ptr));
kasan_enable_current();
instruction_pointer_set(regs, (unsigned long) jp->entry);
preempt_disable();
@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
kasan_disable_current();
memcpy((void *)stack_addr, kcb->jprobes_stack,
min_stack_size(stack_addr));
kasan_enable_current();
preempt_enable_no_resched();
return 1;
}

View File

@ -101,12 +101,20 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */
adr_l lr, __enable_mmu /* __cpu_setup will return here */
ldr x27, =_cpu_resume /* __enable_mmu will branch here */
adr_l x27, _resume_switched /* __enable_mmu will branch here */
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
b __cpu_setup
ENDPROC(cpu_resume)
.pushsection ".idmap.text", "ax"
_resume_switched:
ldr x8, =_cpu_resume
br x8
ENDPROC(_resume_switched)
.ltorg
.popsection
ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash

View File

@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
if (cpu_count > NR_CPUS)
pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
cpu_count, NR_CPUS);
if (cpu_count > nr_cpu_ids)
pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
cpu_count, nr_cpu_ids);
if (!bootcpu_valid) {
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
* with entries in cpu_logical_map while initializing the cpus.
* If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/
for (i = 1; i < NR_CPUS; i++) {
for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
cpu_logical_map(i) = INVALID_HWID;

View File

@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
pte_t *pte = pte_offset_kernel(pmd, 0UL);
unsigned long addr;
unsigned i;
@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
pmd_t *pmd = pmd_offset(pud, 0);
pmd_t *pmd = pmd_offset(pud, 0UL);
unsigned long addr;
unsigned i;
@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
pud_t *pud = pud_offset(pgd, 0);
pud_t *pud = pud_offset(pgd, 0UL);
unsigned long addr;
unsigned i;

View File

@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
}
#endif
static bool is_el1_instruction_abort(unsigned int esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
/*
* The kernel tried to access some page that wasn't present.
*/
@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
{
/*
* Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
*/
if (fixup_exception(regs))
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
/*
@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
(ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
}
static bool is_el0_instruction_abort(unsigned int esr)
@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (is_el1_instruction_abort(esr))
die("Attempting to execute userspace memory", regs, esr);
if (!search_exception_tables(regs->pc))
die("Accessing user space memory outside uaccess.h routines", regs, esr);
}

View File

@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <asm/acpi.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
nodemask_t numa_nodes_parsed __initdata;

View File

@ -3,6 +3,8 @@
#ifdef __KERNEL__
#include <linux/types.h>
/* H8/300 internal I/O functions */
#define __raw_readb __raw_readb

View File

@ -52,6 +52,7 @@ config IA64
select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
default y
help
The Itanium Processor Family is Intel's 64-bit successor to

View File

@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
if (!__builtin_constant_p(count))
check_object_size(from, count, true);
return __copy_user(to, (__force void __user *) from, count);
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
if (!__builtin_constant_p(count))
check_object_size(to, count, false);
return __copy_user((__force void __user *) to, from, count);
}
@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
const void *__cu_from = (from); \
long __cu_len = (n); \
\
if (__access_ok(__cu_to, __cu_len, get_fs())) \
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
if (!__builtin_constant_p(n)) \
check_object_size(__cu_from, __cu_len, true); \
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
} \
__cu_len; \
})
@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) \
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
if (!__builtin_constant_p(n)) \
check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
} \
__cu_len; \
})

View File

@ -213,7 +213,6 @@ static inline int frame_extra_sizes(int f)
static inline void adjustformat(struct pt_regs *regs)
{
((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
/*
* set format byte to make stack appear modulo 4, which it will
* be when doing the rte

View File

@ -390,7 +390,6 @@ void __init mem_init(void)
free_all_bootmem();
mem_init_print_info(NULL);
show_mem(0);
}
void free_initmem(void)

View File

@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
__func__, va, vcpu, read_c0_entryhi());
er = EMULATE_FAIL;
preempt_enable();
goto done;
}
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index;
@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
run, vcpu);
preempt_enable();
goto dont_update_pc;
} else {
/*
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
}
/*
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, va, index, vcpu,
read_c0_entryhi());
er = EMULATE_FAIL;
preempt_enable();
goto done;
}
}
} else {
@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, va, index, vcpu,
read_c0_entryhi());
er = EMULATE_FAIL;
}
}
}

View File

@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
}
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
if (gfn >= kvm->arch.guest_pmap_npages) {
if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
gfn, badvaddr);
kvm_mips_dump_host_tlbs();
@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
kvm_pfn_t pfn0, pfn1;
gfn_t gfn0, gfn1;
long tlb_lo[2];
int ret;
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
pfn0 = 0;
pfn1 = 0;
} else {
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
>> PAGE_SHIFT) < 0)
return -1;
tlb_lo[0] = tlb->tlb_lo[0];
tlb_lo[1] = tlb->tlb_lo[1];
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
>> PAGE_SHIFT) < 0)
return -1;
/*
* The commpage address must not be mapped to anything else if the guest
* TLB contains entries nearby, or commpage accesses will break.
*/
if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
VPN2_MASK & (PAGE_MASK << 1)))
tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
pfn0 = kvm->arch.guest_pmap[
mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
pfn1 = kvm->arch.guest_pmap[
mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
if (gfn0 >= kvm->arch.guest_pmap_npages ||
gfn1 >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
__func__, gfn0, gfn1, tlb->tlb_hi);
kvm_mips_dump_guest_tlbs(vcpu);
return -1;
}
if (kvm_mips_map_page(kvm, gfn0) < 0)
return -1;
if (kvm_mips_map_page(kvm, gfn1) < 0)
return -1;
pfn0 = kvm->arch.guest_pmap[gfn0];
pfn1 = kvm->arch.guest_pmap[gfn1];
/* Get attributes from the Guest TLB */
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
(tlb->tlb_lo[0] & ENTRYLO_D) |
(tlb->tlb_lo[0] & ENTRYLO_V);
(tlb_lo[0] & ENTRYLO_D) |
(tlb_lo[0] & ENTRYLO_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
(tlb->tlb_lo[1] & ENTRYLO_D) |
(tlb->tlb_lo[1] & ENTRYLO_V);
(tlb_lo[1] & ENTRYLO_D) |
(tlb_lo[1] & ENTRYLO_V);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo[0], tlb->tlb_lo[1]);
@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
local_irq_restore(flags);
return KVM_INVALID_INST;
}
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
&vcpu->arch.
guest_tlb[index]);
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
&vcpu->arch.guest_tlb[index])) {
kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, opc, index, vcpu,
read_c0_entryhi());
kvm_mips_dump_guest_tlbs(vcpu);
local_irq_restore(flags);
return KVM_INVALID_INST;
}
inst = *(opc);
}
local_irq_restore(flags);

View File

@ -97,10 +97,10 @@
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */

View File

@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
extern int update_cr16_clocksource(void); /* from time.c */
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
}
#endif
/* If we've registered more than one cpu,
* we'll use the jiffies clocksource since cr16
* is not synchronized between CPUs.
*/
update_cr16_clocksource();
return 0;
}

View File

@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int update_cr16_clocksource(void)
{
/* since the cr16 cycle counters are not synchronized across CPUs,
we'll check if we should switch to a safe clocksource: */
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
clocksource_change_rating(&clocksource_cr16, 0);
return 1;
}
return 0;
}
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();

View File

@ -166,6 +166,7 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_ARCH_HARDENED_USERCOPY
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN

View File

@ -66,29 +66,28 @@ endif
UTS_MACHINE := $(OLDARCH)
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override CC += -mlittle-endian
ifneq ($(cc-name),clang)
override CC += -mno-strict-align
endif
override AS += -mlittle-endian
override LD += -EL
override CROSS32CC += -mlittle-endian
override CROSS32AS += -mlittle-endian
LDEMULATION := lppc
GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
else
ifeq ($(call cc-option-yn,-mbig-endian),y)
override CC += -mbig-endian
override AS += -mbig-endian
endif
override LD += -EB
LDEMULATION := ppc
GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple
endif
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y)
override AS += -a$(CONFIG_WORD_SIZE)
override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200
KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y)
KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o

View File

@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/switch_to.h>
#define CHKSUM_BLOCK_SIZE 1
@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void)
crypto_unregister_shash(&alg);
}
module_init(crc32c_vpmsum_mod_init);
module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
module_exit(crc32c_vpmsum_mod_fini);
MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");

View File

@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state;
#endif
/* Idle state entry routines */
#ifdef CONFIG_PPC_P7_NAP
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
#endif /* CONFIG_PPC_P7_NAP */
#endif

View File

@ -186,6 +186,7 @@ label##3: \
#ifndef __ASSEMBLY__
void apply_feature_fixups(void);
void setup_feature_keys(void);
#endif
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */

View File

@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void)
static inline void __giveup_spe(struct task_struct *t) { }
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void flush_tmregs_to_thread(struct task_struct *);
#else
static inline void flush_tmregs_to_thread(struct task_struct *t)
{
}
#endif
static inline void clear_task_ebb(struct task_struct *t)
{
#ifdef CONFIG_PPC_BOOK3S_64

View File

@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
{
unsigned long over;
if (access_ok(VERIFY_READ, from, n))
if (access_ok(VERIFY_READ, from, n)) {
if (!__builtin_constant_p(n))
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
if (!__builtin_constant_p(n - over))
check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n))
if (access_ok(VERIFY_WRITE, to, n)) {
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
}
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
if (!__builtin_constant_p(n))
check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
if (ret == 0)
return 0;
}
if (!__builtin_constant_p(n))
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0)
return 0;
}
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}

View File

@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void);
extern void icp_native_eoi(struct irq_data *d);
extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
extern int xics_retrigger(struct irq_data *data);
#ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check);

View File

@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
int n = 0, l = 0;
char buffer[128];
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));

View File

@ -144,29 +144,14 @@ machine_check_pSeries_1:
* vector
*/
SET_SCRATCH0(r13) /* save r13 */
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
/* Running native on arch 2.06 or later, check if we are
* waking up from nap. We only handle no state loss and
* supervisor state loss. We do -not- handle hypervisor
* state loss at this time.
/*
* Running native on arch 2.06 or later, we may wakeup from winkle
* inside machine check. If yes, then last bit of HSPGR0 would be set
* to 1. Hence clear it unconditionally.
*/
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
beq 9f
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
/* waking up from powersave (nap) state */
cmpwi cr1,r13,2
/* Total loss of HV state is fatal. let's just stay stuck here */
OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
bgt cr1,.
9:
OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
GET_PACA(r13)
clrrdi r13,r13,1
SET_PACA(r13)
EXCEPTION_PROLOG_0(PACA_EXMC)
BEGIN_FTR_SECTION
b machine_check_powernv_early
@ -1273,25 +1258,51 @@ machine_check_handle_early:
* Check if thread was in power saving mode. We come here when any
* of the following is true:
* a. thread wasn't in power saving mode
* b. thread was in power saving mode with no state loss or
* supervisor state loss
* b. thread was in power saving mode with no state loss,
* supervisor state loss or hypervisor state loss.
*
* Go back to nap again if (b) is true.
* Go back to nap/sleep/winkle mode again if (b) is true.
*/
rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
beq 4f /* No, it wasn;t */
/* Thread was in power saving mode. Go back to nap again. */
cmpwi r11,2
bne 3f
/* Supervisor state loss */
blt 3f
/* Supervisor/Hypervisor state loss */
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
3: bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
GET_PACA(r13)
ld r1,PACAR1(r13)
li r3,PNV_THREAD_NAP
b pnv_enter_arch207_idle_mode
/*
* Check what idle state this CPU was in and go back to same mode
* again.
*/
lbz r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi r3,PNV_THREAD_NAP
bgt 10f
IDLE_STATE_ENTER_SEQ(PPC_NAP)
/* No return */
10:
cmpwi r3,PNV_THREAD_SLEEP
bgt 2f
IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
/* No return */
2:
/*
* Go back to winkle. Please note that this thread was woken up in
* machine check from winkle and have not restored the per-subcore
* state. Hence before going back to winkle, set last bit of HSPGR0
* to 1. This will make sure that if this thread gets woken up
* again at reset vector 0x100 then it will get chance to restore
* the subcore state.
*/
ori r13,r13,1
SET_PACA(r13)
IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
/* No return */
4:
#endif
/*

View File

@ -44,18 +44,6 @@
PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
PSSCR_MTL_MASK
/* Idle state entry routines */
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
.text
/*
@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop)
* cr3 - set to gt if waking up with partial/complete hypervisor state loss
*/
_GLOBAL(pnv_restore_hyp_resource)
ld r2,PACATOC(r13);
BEGIN_FTR_SECTION
ld r2,PACATOC(r13);
/*
* POWER ISA 3. Use PSSCR to determine if we
* are waking up from deep idle state
@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*/
clrldi r5,r13,63
clrrdi r13,r13,1
/* Now that we are sure r13 is corrected, load TOC */
ld r2,PACATOC(r13);
cmpwi cr4,r5,1
mtspr SPRN_HSPRG0,r13

View File

@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->in_use = 1;
mce->initiator = MCE_INITIATOR_CPU;
if (handled)
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;

View File

@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops);
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
u32 prop_32;
u64 prop;
/*
@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn)
* reading "ibm,opal-phbid", only present in OPAL environment.
*/
ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
if (ret)
ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
if (ret) {
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));

View File

@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#endif
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* Process self tracing is not yet supported through
* ptrace interface. Ptrace generic code should have
* prevented this from happening in the first place.
* Warn once here with the message, if some how it
* is attempted.
*/
WARN_ONCE(tsk == current,
"Not expecting ptrace on self: TM regs may be incorrect\n");
/*
* If task is not current, it should have been flushed
* already to it's thread_struct during __switch_to().
*/
}
#endif
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{

View File

@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
prom_printf("Booting Linux via __start() ...\n");
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%x\n", hdr);
}

View File

@ -38,6 +38,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_END,
};
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
* A reclaim flushes ALL the state.
*/
if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(TM_CAUSE_SIGNAL);
}
#else
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
#endif
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register

View File

@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* and we are running with enough of the MMU enabled to have our
* proper kernel virtual addresses
*
* Find out what kind of machine we're on and save any data we need
* from the early boot process (devtree is copied on pmac by prom_init()).
* This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called.
* We do the initial parsing of the flat device-tree and prepares
* for the MMU to be fully initialized.
*/
extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
notrace void __init machine_init(u64 dt_ptr)
{
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();

View File

@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Apply all the dynamic patching */
apply_feature_fixups();
setup_feature_keys();
/* Initialize the hash table or TLB handling */
early_init_mmu();

View File

@ -22,6 +22,7 @@
#include <linux/security.h>
#include <linux/memblock.h>
#include <asm/cpu_has_feature.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>

View File

@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<

View File

@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S
$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<

View File

@ -1329,20 +1329,16 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
xics->kvm = kvm;
/* Already there ? */
mutex_lock(&kvm->lock);
if (kvm->arch.xics)
ret = -EEXIST;
else
kvm->arch.xics = xics;
mutex_unlock(&kvm->lock);
if (ret) {
kfree(xics);
return ret;
}
xics_debugfs_init(xics);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/* Enable real mode support */
@ -1354,9 +1350,17 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
return 0;
}
static void kvmppc_xics_init(struct kvm_device *dev)
{
struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
xics_debugfs_init(xics);
}
struct kvm_device_ops kvm_xics_ops = {
.name = "kvm-xics",
.create = kvmppc_xics_create,
.init = kvmppc_xics_init,
.destroy = kvmppc_xics_free,
.set_attr = xics_set_attr,
.get_attr = xics_get_attr,

View File

@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic)
stw r7,12(r1)
stw r8,8(r1)
andi. r0,r4,1 /* is destination address even ? */
cmplwi cr7,r0,0
rlwinm r0,r4,3,0x8
rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */
cmplwi cr7,r0,0 /* is destination address even ? */
addic r12,r6,0
addi r6,r4,-4
neg r0,r4
@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic)
66: addze r3,r12
addi r1,r1,16
beqlr+ cr7
rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */
rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
/* read fault */

View File

@ -188,7 +188,10 @@ void __init apply_feature_fixups(void)
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
#endif
do_final_fixups();
}
void __init setup_feature_keys(void)
{
/*
* Initialise jump label. This causes all the cpu/mmu_has_feature()
* checks to take on their correct polarity based on the current set of

View File

@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
if (!gang)
if (!gang) {
ret = -ENOMEM;
goto out_iput;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;

View File

@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
*/
dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return;
}
#endif

View File

@ -228,7 +228,8 @@ int __init opal_event_init(void)
}
/* Install interrupt handler */
rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
"opal", NULL);
if (rc) {
irq_dispose_mapping(virq);
pr_warn("Error %d requesting irq %d (0x%x)\n",

View File

@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs,
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */

View File

@ -111,10 +111,17 @@ static int __init iommu_setup(char *str)
}
early_param("iommu", iommu_setup);
static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
{
return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
(IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
/*
* WARNING: We cannot rely on the resource flags. The Linux PCI
* allocation code sometimes decides to put a 64-bit prefetchable
* BAR in the 32-bit window, so we have to compare the addresses.
*
* For simplicity we only test resource start.
*/
return (r->start >= phb->ioda.m64_base &&
r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
}
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
sgsz = phb->ioda.m64_segsize;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
r = &pdev->resource[i];
if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
if (!r->parent || !pnv_pci_is_m64(phb, r))
continue;
start = _ALIGN_DOWN(r->start - base, sgsz);
@ -1877,7 +1884,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
unsigned shift, unsigned long index,
unsigned long npages)
{
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->flags || res->parent)
continue;
if (!pnv_pci_is_mem_pref_64(res->flags)) {
if (!pnv_pci_is_m64(phb, res)) {
dev_warn(&pdev->dev, "Don't support SR-IOV with"
" non M64 VF BAR%d: %pR. \n",
i, res);
@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
index++;
}
} else if ((res->flags & IORESOURCE_MEM) &&
!pnv_pci_is_mem_pref_64(res->flags)) {
!pnv_pci_is_m64(phb, res)) {
region.start = res->start -
phb->hose->mem_offset[0] -
phb->ioda.m32_pci_base;
@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
bridge = bridge->bus->self;
}
/* We fail back to M32 if M64 isn't supported */
if (phb->ioda.m64_segsize &&
pnv_pci_is_mem_pref_64(type))
/*
* We fall back to M32 if M64 isn't supported. We enforce the M64
* alignment for any 64-bit resource, PCIe doesn't care and
* bridges only do 64-bit prefetchable anyway.
*/
if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
return phb->ioda.m64_segsize;
if (type & IORESOURCE_MEM)
return phb->ioda.m32_segsize;
@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
w = NULL;
if (r->flags & type & IORESOURCE_IO)
w = &hose->io_resource;
else if (pnv_pci_is_mem_pref_64(r->flags) &&
else if (pnv_pci_is_m64(phb, r) &&
(type & IORESOURCE_PREFETCH) &&
phb->ioda.m64_segsize)
w = &hose->mem_resources[1];

View File

@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
return dlpar_update_device_tree_lmb(lmb);
}
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
static int dlpar_add_lmb(struct of_drconf_cell *);
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
{
struct memory_block *mem_block;

View File

@ -1,6 +1,7 @@
config PPC_XICS
def_bool n
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
config PPC_ICP_NATIVE
def_bool n

View File

@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = {
.irq_mask = ics_opal_mask_irq,
.irq_unmask = ics_opal_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_opal_set_affinity
.irq_set_affinity = ics_opal_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
};
static int ics_opal_map(struct ics *ics, unsigned int virq);

View File

@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = {
.irq_mask = ics_rtas_mask_irq,
.irq_unmask = ics_rtas_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_rtas_set_affinity
.irq_set_affinity = ics_rtas_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
};
static int ics_rtas_map(struct ics *ics, unsigned int virq)

View File

@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* They aren't all level sensitive but we just don't really know */
irq_set_status_flags(virq, IRQ_LEVEL);
/*
* Mark interrupts as edge sensitive by default so that resend
* actually works. The device-tree parsing will turn the LSIs
* back to level.
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */
if (hw == XICS_IPI) {
@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
/* Current xics implementation translates everything
* to level. It is not technically right for MSIs but this
* is irrelevant at this point. We might get smarter in the future
*/
*out_hwirq = intspec[0];
*out_flags = IRQ_TYPE_LEVEL_LOW;
/*
* If intsize is at least 2, we look for the type in the second cell,
* we assume the LSB indicates a level interrupt.
*/
if (intsize > 1) {
if (intspec[1] & 1)
*out_flags = IRQ_TYPE_LEVEL_LOW;
else
*out_flags = IRQ_TYPE_EDGE_RISING;
} else
*out_flags = IRQ_TYPE_LEVEL_LOW;
return 0;
}
int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
/*
* We only support these. This has really no effect other than setting
* the corresponding descriptor bits mind you but those will in turn
* affect the resend function when re-enabling an edge interrupt.
*
* Set set the default to edge as explained in map().
*/
if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_RISING;
if (flow_type != IRQ_TYPE_EDGE_RISING &&
flow_type != IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
irqd_set_trigger_type(d, flow_type);
return IRQ_SET_MASK_OK_NOCOPY;
}
int xics_retrigger(struct irq_data *data)
{
/*
* We need to push a dummy CPPR when retriggering, since the subsequent
* EOI will try to pop it. Passing 0 works, as the function hard codes
* the priority value anyway.
*/
xics_push_cppr(0);
/* Tell the core to do a soft retrigger */
return 0;
}

View File

@ -123,6 +123,7 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_EARLY_PFN_TO_NID
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
@ -871,4 +872,17 @@ config S390_GUEST
Select this option if you want to run the kernel as a guest under
the KVM hypervisor.
config S390_GUEST_OLD_TRANSPORT
def_bool y
prompt "Guest support for old s390 virtio transport (DEPRECATED)"
depends on S390_GUEST
help
Enable this option to add support for the old s390-virtio
transport (i.e. virtio devices NOT based on virtio-ccw). This
type of virtio devices is only available on the experimental
kuli userspace or with old (< 2.6) qemu. If you are running
with a modern version of qemu (which supports virtio-ccw since
1.4 and uses it by default since version 2.4), you probably won't
need this.
endmenu

View File

@ -21,16 +21,21 @@ ENTRY(startup_continue)
lg %r15,.Lstack-.LPG1(%r13)
aghi %r15,-160
brasl %r14,decompress_kernel
# setup registers for memory mover & branch to target
# Set up registers for memory mover. We move the decompressed image to
# 0x11000, starting at offset 0x11000 in the decompressed image so
# that code living at 0x11000 in the image will end up at 0x11000 in
# memory.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
la %r4,0(%r2,%r4)
lg %r3,.Lmvsize-.LPG1(%r13)
lgr %r5,%r3
# move the memory mover someplace safe
# Move the memory mover someplace safe so it doesn't overwrite itself.
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# decompress image is started at 0x11000
# When the memory mover is done we pass control to
# arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
# the decompressed image.
lgr %r6,%r2
br %r1
mover:

View File

@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m

View File

@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m

View File

@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m

View File

@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
struct kernel_fpu vxstate; \
unsigned long prealign, aligned, remaining; \
\
if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
return ___crc32_sw(crc, data, datalen); \
\
if ((unsigned long)data & VX_ALIGN_MASK) { \
prealign = VX_ALIGNMENT - \
((unsigned long)data & VX_ALIGN_MASK); \
@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
data = (void *)((unsigned long)data + prealign); \
} \
\
if (datalen < VX_MIN_LEN) \
return ___crc32_sw(crc, data, datalen); \
\
aligned = datalen & ~VX_ALIGN_MASK; \
remaining = datalen & VX_ALIGN_MASK; \
\

View File

@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_CRC32_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set

View File

@ -309,7 +309,9 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
/* Continue with startup code in head64.S */
# For uncompressed images, continue in
# arch/s390/kernel/head64.S. For compressed images, continue in
# arch/s390/boot/compressed/head.S.
jg startup_continue
.Lstack:

Some files were not shown because too many files have changed in this diff Show More