mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-04 19:34:17 +08:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "This is the powerpc changes for the 3.11 merge window. In addition to the usual bug fixes and small updates, the main highlights are: - Support for transparent huge pages by Aneesh Kumar for 64-bit server processors. This allows the use of 16M pages as transparent huge pages on kernels compiled with a 64K base page size. - Base VFIO support for KVM on power by Alexey Kardashevskiy - Wiring up of our nvram to the pstore infrastructure, including putting compressed oopses in there by Aruna Balakrishnaiah - Move, rework and improve our "EEH" (basically PCI error handling and recovery) infrastructure. It is no longer specific to pseries but is now usable by the new "powernv" platform as well (no hypervisor) by Gavin Shan. - I fixed some bugs in our math-emu instruction decoding and made it usable to emulate some optional FP instructions on processors with hard FP that lack them (such as fsqrt on Freescale embedded processors). - Support for Power8 "Event Based Branch" facility by Michael Ellerman. This facility allows what is basically "userspace interrupts" for performance monitor events. - A bunch of Transactional Memory vs. Signals bug fixes and HW breakpoint/watchpoint fixes by Michael Neuling. And more ... I appologize in advance if I've failed to highlight something that somebody deemed worth it." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (156 commits) pstore: Add hsize argument in write_buf call of pstore_ftrace_call powerpc/fsl: add MPIC timer wakeup support powerpc/mpic: create mpic subsystem object powerpc/mpic: add global timer support powerpc/mpic: add irq_set_wake support powerpc/85xx: enable coreint for all the 64bit boards powerpc/8xx: Erroneous double irq_eoi() on CPM IRQ in MPC8xx powerpc/fsl: Enable CONFIG_E1000E in mpc85xx_smp_defconfig powerpc/mpic: Add get_version API both for internal and external use powerpc: Handle both new style and old style reserve maps powerpc/hw_brk: Fix off by one error when validating DAWR region end powerpc/pseries: Support compression of oops text via pstore powerpc/pseries: Re-organise the oops compression code pstore: Pass header size in the pstore write callback powerpc/powernv: Fix iommu initialization again powerpc/pseries: Inform the hypervisor we are using EBB regs powerpc/perf: Add power8 EBB support powerpc/perf: Core EBB support for 64-bit book3s powerpc/perf: Drop MMCRA from thread_struct powerpc/perf: Don't enable if we have zero events ...
This commit is contained in:
commit
65b97fb730
309
Documentation/devicetree/bindings/powerpc/fsl/interlaken-lac.txt
Normal file
309
Documentation/devicetree/bindings/powerpc/fsl/interlaken-lac.txt
Normal file
@ -0,0 +1,309 @@
|
||||
===============================================================================
|
||||
Freescale Interlaken Look-Aside Controller Device Bindings
|
||||
Copyright 2012 Freescale Semiconductor Inc.
|
||||
|
||||
CONTENTS
|
||||
- Interlaken Look-Aside Controller (LAC) Node
|
||||
- Example LAC Node
|
||||
- Interlaken Look-Aside Controller (LAC) Software Portal Node
|
||||
- Interlaken Look-Aside Controller (LAC) Software Portal Child Nodes
|
||||
- Example LAC SWP Node with Child Nodes
|
||||
|
||||
==============================================================================
|
||||
Interlaken Look-Aside Controller (LAC) Node
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
The Interlaken is a narrow, high speed channelized chip-to-chip interface. To
|
||||
facilitate interoperability between a data path device and a look-aside
|
||||
co-processor, the Interlaken Look-Aside protocol is defined for short
|
||||
transaction-related transfers. Although based on the Interlaken protocol,
|
||||
Interlaken Look-Aside is not directly compatible with Interlaken and can be
|
||||
considered a different operation mode.
|
||||
|
||||
The Interlaken LA controller connects internal platform to Interlaken serial
|
||||
interface. It accepts LA command through software portals, which are system
|
||||
memory mapped 4KB spaces. The LA commands are then translated into the
|
||||
Interlaken control words and data words, which are sent on TX side to TCAM
|
||||
through SerDes lanes.
|
||||
|
||||
There are two 4KiB spaces defined within the LAC global register memory map.
|
||||
There is a full register set at 0x0000-0x0FFF (also known as the "hypervisor"
|
||||
version), and a subset at 0x1000-0x1FFF. The former is a superset of the
|
||||
latter, and includes certain registers that should not be accessible to
|
||||
partitioned software. Separate nodes are used for each region, with a phandle
|
||||
linking the hypervisor node to the normal operating node.
|
||||
|
||||
PROPERTIES
|
||||
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: Must include "fsl,interlaken-lac". This represents only
|
||||
those LAC CCSR registers not protected in partitioned
|
||||
software. The version of the device is determined by the LAC
|
||||
IP Block Revision Register (IPBRR0) at offset 0x0BF8.
|
||||
|
||||
Table of correspondences between IPBRR0 values and example
|
||||
chips:
|
||||
Value Device
|
||||
----------- -------
|
||||
0x02000100 T4240
|
||||
|
||||
The Hypervisor node has a different compatible. It must include
|
||||
"fsl,interlaken-lac-hv". This node represents the protected
|
||||
LAC register space and is required except inside a partition
|
||||
where access to the hypervisor node is to be denied.
|
||||
|
||||
- fsl,non-hv-node
|
||||
Usage: required in "fsl,interlaken-lac-hv"
|
||||
Value type: <phandle>
|
||||
Definition: Points to the non-protected LAC CCSR mapped register space
|
||||
node.
|
||||
|
||||
- reg
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: A standard property. The first resource represents the
|
||||
Interlaken LAC configuration registers.
|
||||
|
||||
- interrupts:
|
||||
Usage: required in non-hv node only
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: Interrupt mapping for Interlaken LAC error IRQ.
|
||||
|
||||
EXAMPLE
|
||||
lac: lac@229000 {
|
||||
compatible = "fsl,interlaken-lac"
|
||||
reg = <0x229000 0x1000>;
|
||||
interrupts = <16 2 1 18>;
|
||||
};
|
||||
|
||||
lac-hv@228000 {
|
||||
compatible = "fsl,interlaken-lac-hv"
|
||||
reg = <0x228000 0x1000>;
|
||||
fsl,non-hv-node = <&lac>;
|
||||
};
|
||||
|
||||
===============================================================================
|
||||
Interlaken Look-Aside Controller (LAC) Software Portal Container Node
|
||||
|
||||
DESCRIPTION
|
||||
The Interlaken Look-Aside Controller (LAC) utilizes Software Portals to accept
|
||||
Interlaken Look-Aside (ILA) commands. The Interlaken LAC software portal
|
||||
memory map occupies 128KB of memory space. The software portal memory space is
|
||||
intended to be cache-enabled. WIMG for each software space is required to be
|
||||
0010 if stashing is enabled; otherwise, WIMG can be 0000 or 0010.
|
||||
|
||||
PROPERTIES
|
||||
|
||||
- #address-cells
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: A standard property. Must have a value of 1.
|
||||
|
||||
- #size-cells
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: A standard property. Must have a value of 1.
|
||||
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: Must include "fsl,interlaken-lac-portals"
|
||||
|
||||
- ranges
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: A standard property. Specifies the address and length
|
||||
of the LAC portal memory space.
|
||||
|
||||
===============================================================================
|
||||
Interlaken Look-Aside Controller (LAC) Software Portals Child Nodes
|
||||
|
||||
DESCRIPTION
|
||||
There are up to 24 available software portals with each software portal
|
||||
requiring 4KB of consecutive memory within the software portal memory mapped
|
||||
space.
|
||||
|
||||
PROPERTIES
|
||||
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: Must include "fsl,interlaken-lac-portal-vX.Y" where X is
|
||||
the Major version (IP_MJ) found in the LAC IP Block Revision
|
||||
Register (IPBRR0), at offset 0x0BF8, and Y is the Minor version
|
||||
(IP_MN).
|
||||
|
||||
Table of correspondences between version values and example chips:
|
||||
Value Device
|
||||
------ -------
|
||||
1.0 T4240
|
||||
|
||||
- reg
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: A standard property. The first resource represents the
|
||||
Interlaken LAC software portal registers.
|
||||
|
||||
- fsl,liodn
|
||||
Value type: <u32>
|
||||
Definition: The logical I/O device number (LIODN) for this device. The
|
||||
LIODN is a number expressed by this device and used to perform
|
||||
look-ups in the IOMMU (PAMU) address table when performing
|
||||
DMAs. This property is automatically added by u-boot.
|
||||
|
||||
===============================================================================
|
||||
EXAMPLE
|
||||
|
||||
lac-portals {
|
||||
#address-cells = <0x1>;
|
||||
#size-cells = <0x1>;
|
||||
compatible = "fsl,interlaken-lac-portals";
|
||||
ranges = <0x0 0xf 0xf4400000 0x20000>;
|
||||
|
||||
lportal0: lac-portal@0 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x204>;
|
||||
reg = <0x0 0x1000>;
|
||||
};
|
||||
|
||||
lportal1: lac-portal@1000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x205>;
|
||||
reg = <0x1000 0x1000>;
|
||||
};
|
||||
|
||||
lportal2: lac-portal@2000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x206>;
|
||||
reg = <0x2000 0x1000>;
|
||||
};
|
||||
|
||||
lportal3: lac-portal@3000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x207>;
|
||||
reg = <0x3000 0x1000>;
|
||||
};
|
||||
|
||||
lportal4: lac-portal@4000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x208>;
|
||||
reg = <0x4000 0x1000>;
|
||||
};
|
||||
|
||||
lportal5: lac-portal@5000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x209>;
|
||||
reg = <0x5000 0x1000>;
|
||||
};
|
||||
|
||||
lportal6: lac-portal@6000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20A>;
|
||||
reg = <0x6000 0x1000>;
|
||||
};
|
||||
|
||||
lportal7: lac-portal@7000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20B>;
|
||||
reg = <0x7000 0x1000>;
|
||||
};
|
||||
|
||||
lportal8: lac-portal@8000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20C>;
|
||||
reg = <0x8000 0x1000>;
|
||||
};
|
||||
|
||||
lportal9: lac-portal@9000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20D>;
|
||||
reg = <0x9000 0x1000>;
|
||||
};
|
||||
|
||||
lportal10: lac-portal@A000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20E>;
|
||||
reg = <0xA000 0x1000>;
|
||||
};
|
||||
|
||||
lportal11: lac-portal@B000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x20F>;
|
||||
reg = <0xB000 0x1000>;
|
||||
};
|
||||
|
||||
lportal12: lac-portal@C000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x210>;
|
||||
reg = <0xC000 0x1000>;
|
||||
};
|
||||
|
||||
lportal13: lac-portal@D000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x211>;
|
||||
reg = <0xD000 0x1000>;
|
||||
};
|
||||
|
||||
lportal14: lac-portal@E000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x212>;
|
||||
reg = <0xE000 0x1000>;
|
||||
};
|
||||
|
||||
lportal15: lac-portal@F000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x213>;
|
||||
reg = <0xF000 0x1000>;
|
||||
};
|
||||
|
||||
lportal16: lac-portal@10000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x214>;
|
||||
reg = <0x10000 0x1000>;
|
||||
};
|
||||
|
||||
lportal17: lac-portal@11000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x215>;
|
||||
reg = <0x11000 0x1000>;
|
||||
};
|
||||
|
||||
lportal8: lac-portal@1200 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x216>;
|
||||
reg = <0x12000 0x1000>;
|
||||
};
|
||||
|
||||
lportal19: lac-portal@13000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x217>;
|
||||
reg = <0x13000 0x1000>;
|
||||
};
|
||||
|
||||
lportal20: lac-portal@14000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x218>;
|
||||
reg = <0x14000 0x1000>;
|
||||
};
|
||||
|
||||
lportal21: lac-portal@15000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x219>;
|
||||
reg = <0x15000 0x1000>;
|
||||
};
|
||||
|
||||
lportal22: lac-portal@16000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x21A>;
|
||||
reg = <0x16000 0x1000>;
|
||||
};
|
||||
|
||||
lportal23: lac-portal@17000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
fsl,liodn = <0x21B>;
|
||||
reg = <0x17000 0x1000>;
|
||||
};
|
||||
};
|
@ -14,6 +14,8 @@ hvcs.txt
|
||||
- IBM "Hypervisor Virtual Console Server" Installation Guide
|
||||
mpc52xx.txt
|
||||
- Linux 2.6.x on MPC52xx family
|
||||
pmu-ebb.txt
|
||||
- Description of the API for using the PMU with Event Based Branches.
|
||||
qe_firmware.txt
|
||||
- describes the layout of firmware binaries for the Freescale QUICC
|
||||
Engine and the code that parses and uploads the microcode therein.
|
||||
|
137
Documentation/powerpc/pmu-ebb.txt
Normal file
137
Documentation/powerpc/pmu-ebb.txt
Normal file
@ -0,0 +1,137 @@
|
||||
PMU Event Based Branches
|
||||
========================
|
||||
|
||||
Event Based Branches (EBBs) are a feature which allows the hardware to
|
||||
branch directly to a specified user space address when certain events occur.
|
||||
|
||||
The full specification is available in Power ISA v2.07:
|
||||
|
||||
https://www.power.org/documentation/power-isa-version-2-07/
|
||||
|
||||
One type of event for which EBBs can be configured is PMU exceptions. This
|
||||
document describes the API for configuring the Power PMU to generate EBBs,
|
||||
using the Linux perf_events API.
|
||||
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
|
||||
Throughout this document we will refer to an "EBB event" or "EBB events". This
|
||||
just refers to a struct perf_event which has set the "EBB" flag in its
|
||||
attr.config. All events which can be configured on the hardware PMU are
|
||||
possible "EBB events".
|
||||
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
When a PMU EBB occurs it is delivered to the currently running process. As such
|
||||
EBBs can only sensibly be used by programs for self-monitoring.
|
||||
|
||||
It is a feature of the perf_events API that events can be created on other
|
||||
processes, subject to standard permission checks. This is also true of EBB
|
||||
events, however unless the target process enables EBBs (via mtspr(BESCR)) no
|
||||
EBBs will ever be delivered.
|
||||
|
||||
This makes it possible for a process to enable EBBs for itself, but not
|
||||
actually configure any events. At a later time another process can come along
|
||||
and attach an EBB event to the process, which will then cause EBBs to be
|
||||
delivered to the first process. It's not clear if this is actually useful.
|
||||
|
||||
|
||||
When the PMU is configured for EBBs, all PMU interrupts are delivered to the
|
||||
user process. This means once an EBB event is scheduled on the PMU, no non-EBB
|
||||
events can be configured. This means that EBB events can not be run
|
||||
concurrently with regular 'perf' commands, or any other perf events.
|
||||
|
||||
It is however safe to run 'perf' commands on a process which is using EBBs. The
|
||||
kernel will in general schedule the EBB event, and perf will be notified that
|
||||
its events could not run.
|
||||
|
||||
The exclusion between EBB events and regular events is implemented using the
|
||||
existing "pinned" and "exclusive" attributes of perf_events. This means EBB
|
||||
events will be given priority over other events, unless they are also pinned.
|
||||
If an EBB event and a regular event are both pinned, then whichever is enabled
|
||||
first will be scheduled and the other will be put in error state. See the
|
||||
section below titled "Enabling an EBB event" for more information.
|
||||
|
||||
|
||||
Creating an EBB event
|
||||
---------------------
|
||||
|
||||
To request that an event is counted using EBB, the event code should have bit
|
||||
63 set.
|
||||
|
||||
EBB events must be created with a particular, and restrictive, set of
|
||||
attributes - this is so that they interoperate correctly with the rest of the
|
||||
perf_events subsystem.
|
||||
|
||||
An EBB event must be created with the "pinned" and "exclusive" attributes set.
|
||||
Note that if you are creating a group of EBB events, only the leader can have
|
||||
these attributes set.
|
||||
|
||||
An EBB event must NOT set any of the "inherit", "sample_period", "freq" or
|
||||
"enable_on_exec" attributes.
|
||||
|
||||
An EBB event must be attached to a task. This is specified to perf_event_open()
|
||||
by passing a pid value, typically 0 indicating the current task.
|
||||
|
||||
All events in a group must agree on whether they want EBB. That is all events
|
||||
must request EBB, or none may request EBB.
|
||||
|
||||
EBB events must specify the PMC they are to be counted on. This ensures
|
||||
userspace is able to reliably determine which PMC the event is scheduled on.
|
||||
|
||||
|
||||
Enabling an EBB event
|
||||
---------------------
|
||||
|
||||
Once an EBB event has been successfully opened, it must be enabled with the
|
||||
perf_events API. This can be achieved either via the ioctl() interface, or the
|
||||
prctl() interface.
|
||||
|
||||
However, due to the design of the perf_events API, enabling an event does not
|
||||
guarantee that it has been scheduled on the PMU. To ensure that the EBB event
|
||||
has been scheduled on the PMU, you must perform a read() on the event. If the
|
||||
read() returns EOF, then the event has not been scheduled and EBBs are not
|
||||
enabled.
|
||||
|
||||
This behaviour occurs because the EBB event is pinned and exclusive. When the
|
||||
EBB event is enabled it will force all other non-pinned events off the PMU. In
|
||||
this case the enable will be successful. However if there is already an event
|
||||
pinned on the PMU then the enable will not be successful.
|
||||
|
||||
|
||||
Reading an EBB event
|
||||
--------------------
|
||||
|
||||
It is possible to read() from an EBB event. However the results are
|
||||
meaningless. Because interrupts are being delivered to the user process the
|
||||
kernel is not able to count the event, and so will return a junk value.
|
||||
|
||||
|
||||
Closing an EBB event
|
||||
--------------------
|
||||
|
||||
When an EBB event is finished with, you can close it using close() as for any
|
||||
regular event. If this is the last EBB event the PMU will be deconfigured and
|
||||
no further PMU EBBs will be delivered.
|
||||
|
||||
|
||||
EBB Handler
|
||||
-----------
|
||||
|
||||
The EBB handler is just regular userspace code, however it must be written in
|
||||
the style of an interrupt handler. When the handler is entered all registers
|
||||
are live (possibly) and so must be saved somehow before the handler can invoke
|
||||
other code.
|
||||
|
||||
It's up to the program how to handle this. For C programs a relatively simple
|
||||
option is to create an interrupt frame on the stack and save registers there.
|
||||
|
||||
Fork
|
||||
----
|
||||
|
||||
EBB events are not inherited across fork. If the child process wishes to use
|
||||
EBBs it should open a new event for itself. Similarly the EBB state in
|
||||
BESCR/EBBHR/EBBRR is cleared across fork().
|
@ -283,6 +283,69 @@ a direct pass through for VFIO_DEVICE_* ioctls. The read/write/mmap
|
||||
interfaces implement the device region access defined by the device's
|
||||
own VFIO_DEVICE_GET_REGION_INFO ioctl.
|
||||
|
||||
|
||||
PPC64 sPAPR implementation note
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
This implementation has some specifics:
|
||||
|
||||
1) Only one IOMMU group per container is supported as an IOMMU group
|
||||
represents the minimal entity which isolation can be guaranteed for and
|
||||
groups are allocated statically, one per a Partitionable Endpoint (PE)
|
||||
(PE is often a PCI domain but not always).
|
||||
|
||||
2) The hardware supports so called DMA windows - the PCI address range
|
||||
within which DMA transfer is allowed, any attempt to access address space
|
||||
out of the window leads to the whole PE isolation.
|
||||
|
||||
3) PPC64 guests are paravirtualized but not fully emulated. There is an API
|
||||
to map/unmap pages for DMA, and it normally maps 1..32 pages per call and
|
||||
currently there is no way to reduce the number of calls. In order to make things
|
||||
faster, the map/unmap handling has been implemented in real mode which provides
|
||||
an excellent performance which has limitations such as inability to do
|
||||
locked pages accounting in real time.
|
||||
|
||||
So 3 additional ioctls have been added:
|
||||
|
||||
VFIO_IOMMU_SPAPR_TCE_GET_INFO - returns the size and the start
|
||||
of the DMA window on the PCI bus.
|
||||
|
||||
VFIO_IOMMU_ENABLE - enables the container. The locked pages accounting
|
||||
is done at this point. This lets user first to know what
|
||||
the DMA window is and adjust rlimit before doing any real job.
|
||||
|
||||
VFIO_IOMMU_DISABLE - disables the container.
|
||||
|
||||
|
||||
The code flow from the example above should be slightly changed:
|
||||
|
||||
.....
|
||||
/* Add the group to the container */
|
||||
ioctl(group, VFIO_GROUP_SET_CONTAINER, &container);
|
||||
|
||||
/* Enable the IOMMU model we want */
|
||||
ioctl(container, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU)
|
||||
|
||||
/* Get addition sPAPR IOMMU info */
|
||||
vfio_iommu_spapr_tce_info spapr_iommu_info;
|
||||
ioctl(container, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &spapr_iommu_info);
|
||||
|
||||
if (ioctl(container, VFIO_IOMMU_ENABLE))
|
||||
/* Cannot enable container, may be low rlimit */
|
||||
|
||||
/* Allocate some space and setup a DMA mapping */
|
||||
dma_map.vaddr = mmap(0, 1024 * 1024, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
|
||||
dma_map.size = 1024 * 1024;
|
||||
dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
|
||||
dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
|
||||
|
||||
/* Check here is .iova/.size are within DMA window from spapr_iommu_info */
|
||||
|
||||
ioctl(container, VFIO_IOMMU_MAP_DMA, &dma_map);
|
||||
.....
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
[1] VFIO was originally an acronym for "Virtual Function I/O" in its
|
||||
|
@ -3123,6 +3123,13 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
|
||||
S: Maintained
|
||||
F: drivers/media/rc/ene_ir.*
|
||||
|
||||
ENHANCED ERROR HANDLING (EEH)
|
||||
M: Gavin Shan <shangw@linux.vnet.ibm.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
F: Documentation/powerpc/eeh-pci-error-recovery.txt
|
||||
F: arch/powerpc/kernel/eeh*.c
|
||||
|
||||
EPSON S1D13XXX FRAMEBUFFER DRIVER
|
||||
M: Kristoffer Ericson <kristoffer.ericson@gmail.com>
|
||||
S: Maintained
|
||||
@ -6192,7 +6199,6 @@ M: Linas Vepstas <linasvepstas@gmail.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/PCI/pci-error-recovery.txt
|
||||
F: Documentation/powerpc/eeh-pci-error-recovery.txt
|
||||
|
||||
PCI SUBSYSTEM
|
||||
M: Bjorn Helgaas <bhelgaas@google.com>
|
||||
|
@ -298,7 +298,7 @@ config HUGETLB_PAGE_SIZE_VARIABLE
|
||||
|
||||
config MATH_EMULATION
|
||||
bool "Math emulation"
|
||||
depends on 4xx || 8xx || E200 || PPC_MPC832x || E500
|
||||
depends on 4xx || 8xx || PPC_MPC832x || BOOKE
|
||||
---help---
|
||||
Some PowerPC chips designed for embedded applications do not have
|
||||
a floating-point unit and therefore do not implement the
|
||||
@ -307,6 +307,10 @@ config MATH_EMULATION
|
||||
unit, which will allow programs that use floating-point
|
||||
instructions to run.
|
||||
|
||||
This is also useful to emulate missing (optional) instructions
|
||||
such as fsqrt on cores that do have an FPU but do not implement
|
||||
them (such as Freescale BookE).
|
||||
|
||||
config PPC_TRANSACTIONAL_MEM
|
||||
bool "Transactional Memory support for POWERPC"
|
||||
depends on PPC_BOOK3S_64
|
||||
@ -315,17 +319,6 @@ config PPC_TRANSACTIONAL_MEM
|
||||
---help---
|
||||
Support user-mode Transactional Memory on POWERPC.
|
||||
|
||||
config 8XX_MINIMAL_FPEMU
|
||||
bool "Minimal math emulation for 8xx"
|
||||
depends on 8xx && !MATH_EMULATION
|
||||
help
|
||||
Older arch/ppc kernels still emulated a few floating point
|
||||
instructions such as load and store, even when full math
|
||||
emulation is disabled. Say "Y" here if you want to preserve
|
||||
this behavior.
|
||||
|
||||
It is recommended that you build a soft-float userspace instead.
|
||||
|
||||
config IOMMU_HELPER
|
||||
def_bool PPC64
|
||||
|
||||
|
@ -147,6 +147,13 @@ choice
|
||||
enable debugging for the wrong type of machine your kernel
|
||||
_will not boot_.
|
||||
|
||||
config PPC_EARLY_DEBUG_BOOTX
|
||||
bool "BootX or OpenFirmware"
|
||||
depends on BOOTX_TEXT
|
||||
help
|
||||
Select this to enable early debugging for a machine using BootX
|
||||
or OpenFirmware.
|
||||
|
||||
config PPC_EARLY_DEBUG_LPAR
|
||||
bool "LPAR HV Console"
|
||||
depends on PPC_PSERIES
|
||||
|
@ -103,6 +103,11 @@
|
||||
interrupts = <34 2>;
|
||||
};
|
||||
|
||||
FPGA0: fpga@50000000 {
|
||||
compatible = "ibm,currituck-fpga";
|
||||
reg = <0x50000000 0x4>;
|
||||
};
|
||||
|
||||
IIC0: i2c@00000000 {
|
||||
compatible = "ibm,iic-currituck", "ibm,iic";
|
||||
reg = <0x0 0x00000014>;
|
||||
|
156
arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi
Normal file
156
arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi
Normal file
@ -0,0 +1,156 @@
|
||||
/* T4240 Interlaken LAC Portal device tree stub with 24 portals.
|
||||
*
|
||||
* Copyright 2012 Freescale Semiconductor Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#address-cells = <0x1>;
|
||||
#size-cells = <0x1>;
|
||||
compatible = "fsl,interlaken-lac-portals";
|
||||
|
||||
lportal0: lac-portal@0 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x0 0x1000>;
|
||||
};
|
||||
|
||||
lportal1: lac-portal@1000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x1000 0x1000>;
|
||||
};
|
||||
|
||||
lportal2: lac-portal@2000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x2000 0x1000>;
|
||||
};
|
||||
|
||||
lportal3: lac-portal@3000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x3000 0x1000>;
|
||||
};
|
||||
|
||||
lportal4: lac-portal@4000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x4000 0x1000>;
|
||||
};
|
||||
|
||||
lportal5: lac-portal@5000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x5000 0x1000>;
|
||||
};
|
||||
|
||||
lportal6: lac-portal@6000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x6000 0x1000>;
|
||||
};
|
||||
|
||||
lportal7: lac-portal@7000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x7000 0x1000>;
|
||||
};
|
||||
|
||||
lportal8: lac-portal@8000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x8000 0x1000>;
|
||||
};
|
||||
|
||||
lportal9: lac-portal@9000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x9000 0x1000>;
|
||||
};
|
||||
|
||||
lportal10: lac-portal@A000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xA000 0x1000>;
|
||||
};
|
||||
|
||||
lportal11: lac-portal@B000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xB000 0x1000>;
|
||||
};
|
||||
|
||||
lportal12: lac-portal@C000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xC000 0x1000>;
|
||||
};
|
||||
|
||||
lportal13: lac-portal@D000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xD000 0x1000>;
|
||||
};
|
||||
|
||||
lportal14: lac-portal@E000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xE000 0x1000>;
|
||||
};
|
||||
|
||||
lportal15: lac-portal@F000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0xF000 0x1000>;
|
||||
};
|
||||
|
||||
lportal16: lac-portal@10000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x10000 0x1000>;
|
||||
};
|
||||
|
||||
lportal17: lac-portal@11000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x11000 0x1000>;
|
||||
};
|
||||
|
||||
lportal18: lac-portal@1200 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x12000 0x1000>;
|
||||
};
|
||||
|
||||
lportal19: lac-portal@13000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x13000 0x1000>;
|
||||
};
|
||||
|
||||
lportal20: lac-portal@14000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x14000 0x1000>;
|
||||
};
|
||||
|
||||
lportal21: lac-portal@15000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x15000 0x1000>;
|
||||
};
|
||||
|
||||
lportal22: lac-portal@16000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x16000 0x1000>;
|
||||
};
|
||||
|
||||
lportal23: lac-portal@17000 {
|
||||
compatible = "fsl,interlaken-lac-portal-v1.0";
|
||||
reg = <0x17000 0x1000>;
|
||||
};
|
45
arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi
Normal file
45
arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* T4 Interlaken Look-aside Controller (LAC) device tree stub
|
||||
*
|
||||
* Copyright 2012 Freescale Semiconductor Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
lac: lac@229000 {
|
||||
compatible = "fsl,interlaken-lac";
|
||||
reg = <0x229000 0x1000>;
|
||||
interrupts = <16 2 1 18>;
|
||||
};
|
||||
|
||||
lac-hv@228000 {
|
||||
compatible = "fsl,interlaken-lac-hv";
|
||||
reg = <0x228000 0x1000>;
|
||||
fsl,non-hv-node = <&lac>;
|
||||
};
|
@ -423,6 +423,8 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_KEYS=y
|
||||
CONFIG_KEYS_DEBUG_PROC_KEYS=y
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -284,6 +284,8 @@ CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_TEST=m
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
|
@ -138,6 +138,8 @@ CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_XMON=y
|
||||
CONFIG_XMON_DEFAULT=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
|
@ -1,7 +1,6 @@
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
@ -9,6 +8,7 @@ CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
# CONFIG_PPC_CHRP is not set
|
||||
CONFIG_PPC_MPC512x=y
|
||||
@ -16,9 +16,7 @@ CONFIG_MPC5121_ADS=y
|
||||
CONFIG_MPC512x_GENERIC=y
|
||||
CONFIG_PDM360NG=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HZ_1000=y
|
||||
# CONFIG_MIGRATION is not set
|
||||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_PCI is not set
|
||||
CONFIG_NET=y
|
||||
@ -33,8 +31,6 @@ CONFIG_IP_PNP=y
|
||||
# CONFIG_INET_DIAG is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_CAN=y
|
||||
CONFIG_CAN_RAW=y
|
||||
CONFIG_CAN_BCM=y
|
||||
CONFIG_CAN_VCAN=y
|
||||
CONFIG_CAN_MSCAN=y
|
||||
CONFIG_CAN_DEBUG_DEVICES=y
|
||||
@ -46,7 +42,6 @@ CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
CONFIG_MTD_CFI=y
|
||||
CONFIG_MTD_CFI_AMDSTD=y
|
||||
@ -60,7 +55,6 @@ CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=1
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_BLK_DEV_XIP=y
|
||||
CONFIG_MISC_DEVICES=y
|
||||
CONFIG_EEPROM_AT24=y
|
||||
CONFIG_EEPROM_AT25=y
|
||||
CONFIG_SCSI=y
|
||||
@ -68,6 +62,7 @@ CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_FS_ENET=y
|
||||
CONFIG_MARVELL_PHY=y
|
||||
CONFIG_DAVICOM_PHY=y
|
||||
CONFIG_QSEMI_PHY=y
|
||||
@ -83,10 +78,6 @@ CONFIG_STE10XP=y
|
||||
CONFIG_LSI_ET1011C_PHY=y
|
||||
CONFIG_FIXED_PHY=y
|
||||
CONFIG_MDIO_BITBANG=y
|
||||
CONFIG_NET_ETHERNET=y
|
||||
CONFIG_FS_ENET=y
|
||||
# CONFIG_NETDEV_1000 is not set
|
||||
# CONFIG_NETDEV_10000 is not set
|
||||
# CONFIG_WLAN is not set
|
||||
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
@ -106,14 +97,18 @@ CONFIG_GPIO_SYSFS=y
|
||||
CONFIG_GPIO_MPC8XXX=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_VIDEO_DEV=y
|
||||
CONFIG_VIDEO_ADV_DEBUG=y
|
||||
# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
|
||||
CONFIG_VIDEO_SAA711X=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_FB_FSL_DIU=y
|
||||
# CONFIG_VGA_CONSOLE is not set
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_FSL=y
|
||||
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_FSL_USB2=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_M41T80=y
|
||||
CONFIG_RTC_DRV_MPC5121=y
|
||||
@ -129,9 +124,7 @@ CONFIG_TMPFS=y
|
||||
CONFIG_JFFS2_FS=y
|
||||
CONFIG_UBIFS_FS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -131,6 +131,7 @@ CONFIG_DUMMY=y
|
||||
CONFIG_FS_ENET=y
|
||||
CONFIG_UCC_GETH=y
|
||||
CONFIG_GIANFAR=y
|
||||
CONFIG_E1000E=y
|
||||
CONFIG_MARVELL_PHY=y
|
||||
CONFIG_DAVICOM_PHY=y
|
||||
CONFIG_CICADA_PHY=y
|
||||
|
@ -350,6 +350,8 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_XMON=y
|
||||
CONFIG_XMON_DEFAULT=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
|
@ -398,6 +398,8 @@ CONFIG_FTR_FIXUP_SELFTEST=y
|
||||
CONFIG_MSI_BITMAP_SELFTEST=y
|
||||
CONFIG_XMON=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_TEST=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
|
@ -1264,6 +1264,8 @@ CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_XMON=y
|
||||
CONFIG_BOOTX_TEXT=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_PPC_EARLY_DEBUG_BOOTX=y
|
||||
CONFIG_KEYS=y
|
||||
CONFIG_KEYS_DEBUG_PROC_KEYS=y
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -296,6 +296,7 @@ CONFIG_SQUASHFS=m
|
||||
CONFIG_SQUASHFS_XATTR=y
|
||||
CONFIG_SQUASHFS_LZO=y
|
||||
CONFIG_SQUASHFS_XZ=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=y
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
struct pci_dev;
|
||||
struct pci_bus;
|
||||
@ -52,6 +53,7 @@ struct device_node;
|
||||
|
||||
#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
|
||||
#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
|
||||
#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */
|
||||
|
||||
struct eeh_pe {
|
||||
int type; /* PE type: PHB/Bus/Device */
|
||||
@ -59,8 +61,10 @@ struct eeh_pe {
|
||||
int config_addr; /* Traditional PCI address */
|
||||
int addr; /* PE configuration address */
|
||||
struct pci_controller *phb; /* Associated PHB */
|
||||
struct pci_bus *bus; /* Top PCI bus for bus PE */
|
||||
int check_count; /* Times of ignored error */
|
||||
int freeze_count; /* Times of froze up */
|
||||
struct timeval tstamp; /* Time on first-time freeze */
|
||||
int false_positives; /* Times of reported #ff's */
|
||||
struct eeh_pe *parent; /* Parent PE */
|
||||
struct list_head child_list; /* Link PE to the child list */
|
||||
@ -95,12 +99,12 @@ struct eeh_dev {
|
||||
|
||||
static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
|
||||
{
|
||||
return edev->dn;
|
||||
return edev ? edev->dn : NULL;
|
||||
}
|
||||
|
||||
static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
|
||||
{
|
||||
return edev->pdev;
|
||||
return edev ? edev->pdev : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -130,8 +134,9 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
|
||||
struct eeh_ops {
|
||||
char *name;
|
||||
int (*init)(void);
|
||||
int (*post_init)(void);
|
||||
void* (*of_probe)(struct device_node *dn, void *flag);
|
||||
void* (*dev_probe)(struct pci_dev *dev, void *flag);
|
||||
int (*dev_probe)(struct pci_dev *dev, void *flag);
|
||||
int (*set_option)(struct eeh_pe *pe, int option);
|
||||
int (*get_pe_addr)(struct eeh_pe *pe);
|
||||
int (*get_state)(struct eeh_pe *pe, int *state);
|
||||
@ -141,11 +146,12 @@ struct eeh_ops {
|
||||
int (*configure_bridge)(struct eeh_pe *pe);
|
||||
int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
|
||||
int (*write_config)(struct device_node *dn, int where, int size, u32 val);
|
||||
int (*next_error)(struct eeh_pe **pe);
|
||||
};
|
||||
|
||||
extern struct eeh_ops *eeh_ops;
|
||||
extern int eeh_subsystem_enabled;
|
||||
extern struct mutex eeh_mutex;
|
||||
extern raw_spinlock_t confirm_error_lock;
|
||||
extern int eeh_probe_mode;
|
||||
|
||||
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
|
||||
@ -166,14 +172,14 @@ static inline int eeh_probe_mode_dev(void)
|
||||
return (eeh_probe_mode == EEH_PROBE_MODE_DEV);
|
||||
}
|
||||
|
||||
static inline void eeh_lock(void)
|
||||
static inline void eeh_serialize_lock(unsigned long *flags)
|
||||
{
|
||||
mutex_lock(&eeh_mutex);
|
||||
raw_spin_lock_irqsave(&confirm_error_lock, *flags);
|
||||
}
|
||||
|
||||
static inline void eeh_unlock(void)
|
||||
static inline void eeh_serialize_unlock(unsigned long flags)
|
||||
{
|
||||
mutex_unlock(&eeh_mutex);
|
||||
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -184,8 +190,11 @@ static inline void eeh_unlock(void)
|
||||
|
||||
typedef void *(*eeh_traverse_func)(void *data, void *flag);
|
||||
int eeh_phb_pe_create(struct pci_controller *phb);
|
||||
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
|
||||
struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
|
||||
int eeh_add_to_parent_pe(struct eeh_dev *edev);
|
||||
int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe);
|
||||
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
|
||||
void *eeh_pe_dev_traverse(struct eeh_pe *root,
|
||||
eeh_traverse_func fn, void *flag);
|
||||
void eeh_pe_restore_bars(struct eeh_pe *pe);
|
||||
@ -193,12 +202,13 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
|
||||
|
||||
void *eeh_dev_init(struct device_node *dn, void *data);
|
||||
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
|
||||
int eeh_init(void);
|
||||
int __init eeh_ops_register(struct eeh_ops *ops);
|
||||
int __exit eeh_ops_unregister(const char *name);
|
||||
unsigned long eeh_check_failure(const volatile void __iomem *token,
|
||||
unsigned long val);
|
||||
int eeh_dev_check_failure(struct eeh_dev *edev);
|
||||
void __init eeh_addr_cache_build(void);
|
||||
void eeh_addr_cache_build(void);
|
||||
void eeh_add_device_tree_early(struct device_node *);
|
||||
void eeh_add_device_tree_late(struct pci_bus *);
|
||||
void eeh_add_sysfs_files(struct pci_bus *);
|
||||
@ -221,6 +231,11 @@ void eeh_remove_bus_device(struct pci_dev *, int);
|
||||
|
||||
#else /* !CONFIG_EEH */
|
||||
|
||||
static inline int eeh_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *eeh_dev_init(struct device_node *dn, void *data)
|
||||
{
|
||||
return NULL;
|
||||
@ -245,9 +260,6 @@ static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
|
||||
|
||||
static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
|
||||
|
||||
static inline void eeh_lock(void) { }
|
||||
static inline void eeh_unlock(void) { }
|
||||
|
||||
#define EEH_POSSIBLE_ERROR(val, type) (0)
|
||||
#define EEH_IO_ERROR_VALUE(size) (-1UL)
|
||||
#endif /* CONFIG_EEH */
|
||||
|
@ -31,7 +31,9 @@ struct eeh_event {
|
||||
struct eeh_pe *pe; /* EEH PE */
|
||||
};
|
||||
|
||||
int eeh_event_init(void);
|
||||
int eeh_send_failure_event(struct eeh_pe *pe);
|
||||
void eeh_remove_event(struct eeh_pe *pe);
|
||||
void eeh_handle_event(struct eeh_pe *pe);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -358,12 +358,12 @@ label##_relon_pSeries: \
|
||||
/* No guest interrupts come through here */ \
|
||||
SET_SCRATCH0(r13); /* save r13 */ \
|
||||
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
|
||||
EXC_STD, KVMTEST_PR, vec)
|
||||
EXC_STD, NOTEST, vec)
|
||||
|
||||
#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
|
||||
.globl label##_relon_pSeries; \
|
||||
label##_relon_pSeries: \
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
|
||||
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
|
||||
|
||||
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
|
||||
@ -374,12 +374,12 @@ label##_relon_hv: \
|
||||
/* No guest interrupts come through here */ \
|
||||
SET_SCRATCH0(r13); /* save r13 */ \
|
||||
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
|
||||
EXC_HV, KVMTEST, vec)
|
||||
EXC_HV, NOTEST, vec)
|
||||
|
||||
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
|
||||
.globl label##_relon_hv; \
|
||||
label##_relon_hv: \
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
|
||||
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
|
||||
|
||||
/* This associate vector numbers with bits in paca->irq_happened */
|
||||
|
@ -191,8 +191,14 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#define hugepd_shift(x) 0
|
||||
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
|
||||
unsigned pdshift)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
/*
|
||||
* FSL Book3E platforms require special gpage handling - the gpages
|
||||
|
@ -76,6 +76,9 @@ struct iommu_table {
|
||||
struct iommu_pool large_pool;
|
||||
struct iommu_pool pools[IOMMU_NR_POOLS];
|
||||
unsigned long *it_map; /* A simple allocation bitmap for now */
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
struct iommu_group *it_group;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct scatterlist;
|
||||
@ -98,6 +101,8 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
|
||||
*/
|
||||
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
|
||||
int nid);
|
||||
extern void iommu_register_group(struct iommu_table *tbl,
|
||||
int pci_domain_number, unsigned long pe_num);
|
||||
|
||||
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
struct scatterlist *sglist, int nelems,
|
||||
@ -125,13 +130,6 @@ extern void iommu_init_early_pSeries(void);
|
||||
extern void iommu_init_early_dart(void);
|
||||
extern void iommu_init_early_pasemi(void);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern void pci_iommu_init(void);
|
||||
extern void pci_direct_iommu_init(void);
|
||||
#else
|
||||
static inline void pci_iommu_init(void) { }
|
||||
#endif
|
||||
|
||||
extern void alloc_dart_table(void);
|
||||
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
|
||||
static inline void iommu_save(void)
|
||||
@ -147,5 +145,26 @@ static inline void iommu_restore(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* The API to support IOMMU operations for VFIO */
|
||||
extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce_value,
|
||||
unsigned long npages);
|
||||
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce);
|
||||
extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long hwaddr, enum dma_data_direction direction);
|
||||
extern unsigned long iommu_clear_tce(struct iommu_table *tbl,
|
||||
unsigned long entry);
|
||||
extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long pages);
|
||||
extern int iommu_put_tce_user_mode(struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long tce);
|
||||
|
||||
extern void iommu_flush_tce(struct iommu_table *tbl);
|
||||
extern int iommu_take_ownership(struct iommu_table *tbl);
|
||||
extern void iommu_release_ownership(struct iommu_table *tbl);
|
||||
|
||||
extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_IOMMU_H */
|
||||
|
@ -159,36 +159,46 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock and read a linux PTE. If it's present and writable, atomically
|
||||
* set dirty and referenced bits and return the PTE, otherwise return 0.
|
||||
* If it's present and writable, atomically set dirty and referenced bits and
|
||||
* return the PTE, otherwise return 0. If we find a transparent hugepage
|
||||
* and if it is marked splitting we return 0;
|
||||
*/
|
||||
static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
|
||||
static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
|
||||
unsigned int hugepage)
|
||||
{
|
||||
pte_t pte, tmp;
|
||||
pte_t old_pte, new_pte = __pte(0);
|
||||
|
||||
/* wait until _PAGE_BUSY is clear then set it atomically */
|
||||
__asm__ __volatile__ (
|
||||
"1: ldarx %0,0,%3\n"
|
||||
" andi. %1,%0,%4\n"
|
||||
" bne- 1b\n"
|
||||
" ori %1,%0,%4\n"
|
||||
" stdcx. %1,0,%3\n"
|
||||
" bne- 1b"
|
||||
: "=&r" (pte), "=&r" (tmp), "=m" (*p)
|
||||
: "r" (p), "i" (_PAGE_BUSY)
|
||||
: "cc");
|
||||
while (1) {
|
||||
old_pte = pte_val(*ptep);
|
||||
/*
|
||||
* wait until _PAGE_BUSY is clear then set it atomically
|
||||
*/
|
||||
if (unlikely(old_pte & _PAGE_BUSY)) {
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/* If hugepage and is trans splitting return None */
|
||||
if (unlikely(hugepage &&
|
||||
pmd_trans_splitting(pte_pmd(old_pte))))
|
||||
return __pte(0);
|
||||
#endif
|
||||
/* If pte is not present return None */
|
||||
if (unlikely(!(old_pte & _PAGE_PRESENT)))
|
||||
return __pte(0);
|
||||
|
||||
if (pte_present(pte)) {
|
||||
pte = pte_mkyoung(pte);
|
||||
if (writing && pte_write(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
new_pte = pte_mkyoung(old_pte);
|
||||
if (writing && pte_write(old_pte))
|
||||
new_pte = pte_mkdirty(new_pte);
|
||||
|
||||
if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
|
||||
new_pte))
|
||||
break;
|
||||
}
|
||||
|
||||
*p = pte; /* clears _PAGE_BUSY */
|
||||
|
||||
return pte;
|
||||
return new_pte;
|
||||
}
|
||||
|
||||
|
||||
/* Return HPTE cache control bits corresponding to Linux pte bits */
|
||||
static inline unsigned long hpte_cache_bits(unsigned long pte_val)
|
||||
{
|
||||
|
@ -66,7 +66,8 @@ struct lppaca {
|
||||
|
||||
u8 reserved6[48];
|
||||
u8 cede_latency_hint;
|
||||
u8 reserved7[7];
|
||||
u8 ebb_regs_in_use;
|
||||
u8 reserved7[6];
|
||||
u8 dtl_enable_mask; /* Dispatch Trace Log mask */
|
||||
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
|
||||
u8 fpregs_in_use;
|
||||
|
@ -36,13 +36,13 @@ struct machdep_calls {
|
||||
#ifdef CONFIG_PPC64
|
||||
void (*hpte_invalidate)(unsigned long slot,
|
||||
unsigned long vpn,
|
||||
int psize, int ssize,
|
||||
int local);
|
||||
int bpsize, int apsize,
|
||||
int ssize, int local);
|
||||
long (*hpte_updatepp)(unsigned long slot,
|
||||
unsigned long newpp,
|
||||
unsigned long vpn,
|
||||
int psize, int ssize,
|
||||
int local);
|
||||
int bpsize, int apsize,
|
||||
int ssize, int local);
|
||||
void (*hpte_updateboltedpp)(unsigned long newpp,
|
||||
unsigned long ea,
|
||||
int psize, int ssize);
|
||||
@ -57,6 +57,9 @@ struct machdep_calls {
|
||||
void (*hpte_removebolted)(unsigned long ea,
|
||||
int psize, int ssize);
|
||||
void (*flush_hash_range)(unsigned long number, int local);
|
||||
void (*hugepage_invalidate)(struct mm_struct *mm,
|
||||
unsigned char *hpte_slot_array,
|
||||
unsigned long addr, int psize);
|
||||
|
||||
/* special for kexec, to be called in real mode, linear mapping is
|
||||
* destroyed as well */
|
||||
|
@ -340,6 +340,20 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
pte_t *ptep, unsigned long trap, int local, int ssize,
|
||||
unsigned int shift, unsigned int mmu_psize);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern int __hash_page_thp(unsigned long ea, unsigned long access,
|
||||
unsigned long vsid, pmd_t *pmdp, unsigned long trap,
|
||||
int local, int ssize, unsigned int psize);
|
||||
#else
|
||||
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
|
||||
unsigned long vsid, pmd_t *pmdp,
|
||||
unsigned long trap, int local,
|
||||
int ssize, unsigned int psize)
|
||||
{
|
||||
BUG();
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
extern void hash_failure_debug(unsigned long ea, unsigned long access,
|
||||
unsigned long vsid, unsigned long trap,
|
||||
int ssize, int psize, int lpsize,
|
||||
|
@ -68,6 +68,5 @@ struct mpc512x_lpc {
|
||||
};
|
||||
|
||||
int mpc512x_cs_config(unsigned int cs, u32 val);
|
||||
int __init mpc5121_clk_init(void);
|
||||
|
||||
#endif /* __ASM_POWERPC_MPC5121_H__ */
|
||||
|
@ -339,6 +339,8 @@ struct mpic
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct bus_type mpic_subsys;
|
||||
|
||||
/*
|
||||
* MPIC flags (passed to mpic_alloc)
|
||||
*
|
||||
@ -393,6 +395,9 @@ struct mpic
|
||||
#define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */
|
||||
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
|
||||
|
||||
/* Get the version of primary MPIC */
|
||||
extern u32 fsl_mpic_primary_get_version(void);
|
||||
|
||||
/* Allocate the controller structure and setup the linux irq descs
|
||||
* for the range if interrupts passed in. No HW initialization is
|
||||
* actually performed.
|
||||
|
46
arch/powerpc/include/asm/mpic_timer.h
Normal file
46
arch/powerpc/include/asm/mpic_timer.h
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* arch/powerpc/include/asm/mpic_timer.h
|
||||
*
|
||||
* Header file for Mpic Global Timer
|
||||
*
|
||||
* Copyright 2013 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Author: Wang Dongsheng <Dongsheng.Wang@freescale.com>
|
||||
* Li Yang <leoli@freescale.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef __MPIC_TIMER__
|
||||
#define __MPIC_TIMER__
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
struct mpic_timer {
|
||||
void *dev;
|
||||
struct cascade_priv *cascade_handle;
|
||||
unsigned int num;
|
||||
unsigned int irq;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MPIC_TIMER
|
||||
struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
|
||||
const struct timeval *time);
|
||||
void mpic_start_timer(struct mpic_timer *handle);
|
||||
void mpic_stop_timer(struct mpic_timer *handle);
|
||||
void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time);
|
||||
void mpic_free_timer(struct mpic_timer *handle);
|
||||
#else
|
||||
struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
|
||||
const struct timeval *time) { return NULL; }
|
||||
void mpic_start_timer(struct mpic_timer *handle) { }
|
||||
void mpic_stop_timer(struct mpic_timer *handle) { }
|
||||
void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time) { }
|
||||
void mpic_free_timer(struct mpic_timer *handle) { }
|
||||
#endif
|
||||
|
||||
#endif
|
@ -117,7 +117,13 @@ extern int opal_enter_rtas(struct rtas_args *args,
|
||||
#define OPAL_SET_SLOT_LED_STATUS 55
|
||||
#define OPAL_GET_EPOW_STATUS 56
|
||||
#define OPAL_SET_SYSTEM_ATTENTION_LED 57
|
||||
#define OPAL_RESERVED1 58
|
||||
#define OPAL_RESERVED2 59
|
||||
#define OPAL_PCI_NEXT_ERROR 60
|
||||
#define OPAL_PCI_EEH_FREEZE_STATUS2 61
|
||||
#define OPAL_PCI_POLL 62
|
||||
#define OPAL_PCI_MSI_EOI 63
|
||||
#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -125,6 +131,7 @@ extern int opal_enter_rtas(struct rtas_args *args,
|
||||
enum OpalVendorApiTokens {
|
||||
OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
|
||||
};
|
||||
|
||||
enum OpalFreezeState {
|
||||
OPAL_EEH_STOPPED_NOT_FROZEN = 0,
|
||||
OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
|
||||
@ -134,55 +141,69 @@ enum OpalFreezeState {
|
||||
OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
|
||||
OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
|
||||
};
|
||||
|
||||
enum OpalEehFreezeActionToken {
|
||||
OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
|
||||
OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
|
||||
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3
|
||||
};
|
||||
|
||||
enum OpalPciStatusToken {
|
||||
OPAL_EEH_PHB_NO_ERROR = 0,
|
||||
OPAL_EEH_PHB_FATAL = 1,
|
||||
OPAL_EEH_PHB_RECOVERABLE = 2,
|
||||
OPAL_EEH_PHB_BUS_ERROR = 3,
|
||||
OPAL_EEH_PCI_NO_DEVSEL = 4,
|
||||
OPAL_EEH_PCI_TA = 5,
|
||||
OPAL_EEH_PCIEX_UR = 6,
|
||||
OPAL_EEH_PCIEX_CA = 7,
|
||||
OPAL_EEH_PCI_MMIO_ERROR = 8,
|
||||
OPAL_EEH_PCI_DMA_ERROR = 9
|
||||
OPAL_EEH_NO_ERROR = 0,
|
||||
OPAL_EEH_IOC_ERROR = 1,
|
||||
OPAL_EEH_PHB_ERROR = 2,
|
||||
OPAL_EEH_PE_ERROR = 3,
|
||||
OPAL_EEH_PE_MMIO_ERROR = 4,
|
||||
OPAL_EEH_PE_DMA_ERROR = 5
|
||||
};
|
||||
|
||||
enum OpalPciErrorSeverity {
|
||||
OPAL_EEH_SEV_NO_ERROR = 0,
|
||||
OPAL_EEH_SEV_IOC_DEAD = 1,
|
||||
OPAL_EEH_SEV_PHB_DEAD = 2,
|
||||
OPAL_EEH_SEV_PHB_FENCED = 3,
|
||||
OPAL_EEH_SEV_PE_ER = 4,
|
||||
OPAL_EEH_SEV_INF = 5
|
||||
};
|
||||
|
||||
enum OpalShpcAction {
|
||||
OPAL_SHPC_GET_LINK_STATE = 0,
|
||||
OPAL_SHPC_GET_SLOT_STATE = 1
|
||||
};
|
||||
|
||||
enum OpalShpcLinkState {
|
||||
OPAL_SHPC_LINK_DOWN = 0,
|
||||
OPAL_SHPC_LINK_UP = 1
|
||||
};
|
||||
|
||||
enum OpalMmioWindowType {
|
||||
OPAL_M32_WINDOW_TYPE = 1,
|
||||
OPAL_M64_WINDOW_TYPE = 2,
|
||||
OPAL_IO_WINDOW_TYPE = 3
|
||||
};
|
||||
|
||||
enum OpalShpcSlotState {
|
||||
OPAL_SHPC_DEV_NOT_PRESENT = 0,
|
||||
OPAL_SHPC_DEV_PRESENT = 1
|
||||
};
|
||||
|
||||
enum OpalExceptionHandler {
|
||||
OPAL_MACHINE_CHECK_HANDLER = 1,
|
||||
OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
|
||||
OPAL_SOFTPATCH_HANDLER = 3
|
||||
};
|
||||
|
||||
enum OpalPendingState {
|
||||
OPAL_EVENT_OPAL_INTERNAL = 0x1,
|
||||
OPAL_EVENT_NVRAM = 0x2,
|
||||
OPAL_EVENT_RTC = 0x4,
|
||||
OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
|
||||
OPAL_EVENT_CONSOLE_INPUT = 0x10,
|
||||
OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
|
||||
OPAL_EVENT_ERROR_LOG = 0x40,
|
||||
OPAL_EVENT_EPOW = 0x80,
|
||||
OPAL_EVENT_LED_STATUS = 0x100
|
||||
OPAL_EVENT_OPAL_INTERNAL = 0x1,
|
||||
OPAL_EVENT_NVRAM = 0x2,
|
||||
OPAL_EVENT_RTC = 0x4,
|
||||
OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
|
||||
OPAL_EVENT_CONSOLE_INPUT = 0x10,
|
||||
OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
|
||||
OPAL_EVENT_ERROR_LOG = 0x40,
|
||||
OPAL_EVENT_EPOW = 0x80,
|
||||
OPAL_EVENT_LED_STATUS = 0x100,
|
||||
OPAL_EVENT_PCI_ERROR = 0x200
|
||||
};
|
||||
|
||||
/* Machine check related definitions */
|
||||
@ -364,15 +385,80 @@ struct opal_machine_check_event {
|
||||
} u;
|
||||
};
|
||||
|
||||
enum {
|
||||
OPAL_P7IOC_DIAG_TYPE_NONE = 0,
|
||||
OPAL_P7IOC_DIAG_TYPE_RGC = 1,
|
||||
OPAL_P7IOC_DIAG_TYPE_BI = 2,
|
||||
OPAL_P7IOC_DIAG_TYPE_CI = 3,
|
||||
OPAL_P7IOC_DIAG_TYPE_MISC = 4,
|
||||
OPAL_P7IOC_DIAG_TYPE_I2C = 5,
|
||||
OPAL_P7IOC_DIAG_TYPE_LAST = 6
|
||||
};
|
||||
|
||||
struct OpalIoP7IOCErrorData {
|
||||
uint16_t type;
|
||||
|
||||
/* GEM */
|
||||
uint64_t gemXfir;
|
||||
uint64_t gemRfir;
|
||||
uint64_t gemRirqfir;
|
||||
uint64_t gemMask;
|
||||
uint64_t gemRwof;
|
||||
|
||||
/* LEM */
|
||||
uint64_t lemFir;
|
||||
uint64_t lemErrMask;
|
||||
uint64_t lemAction0;
|
||||
uint64_t lemAction1;
|
||||
uint64_t lemWof;
|
||||
|
||||
union {
|
||||
struct OpalIoP7IOCRgcErrorData {
|
||||
uint64_t rgcStatus; /* 3E1C10 */
|
||||
uint64_t rgcLdcp; /* 3E1C18 */
|
||||
}rgc;
|
||||
struct OpalIoP7IOCBiErrorData {
|
||||
uint64_t biLdcp0; /* 3C0100, 3C0118 */
|
||||
uint64_t biLdcp1; /* 3C0108, 3C0120 */
|
||||
uint64_t biLdcp2; /* 3C0110, 3C0128 */
|
||||
uint64_t biFenceStatus; /* 3C0130, 3C0130 */
|
||||
|
||||
uint8_t biDownbound; /* BI Downbound or Upbound */
|
||||
}bi;
|
||||
struct OpalIoP7IOCCiErrorData {
|
||||
uint64_t ciPortStatus; /* 3Dn008 */
|
||||
uint64_t ciPortLdcp; /* 3Dn010 */
|
||||
|
||||
uint8_t ciPort; /* Index of CI port: 0/1 */
|
||||
}ci;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* This structure defines the overlay which will be used to store PHB error
|
||||
* data upon request.
|
||||
*/
|
||||
enum {
|
||||
OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
OPAL_P7IOC_NUM_PEST_REGS = 128,
|
||||
};
|
||||
|
||||
struct OpalIoPhbErrorCommon {
|
||||
uint32_t version;
|
||||
uint32_t ioType;
|
||||
uint32_t len;
|
||||
};
|
||||
|
||||
struct OpalIoP7IOCPhbErrorData {
|
||||
struct OpalIoPhbErrorCommon common;
|
||||
|
||||
uint32_t brdgCtl;
|
||||
|
||||
// P7IOC utl regs
|
||||
@ -530,14 +616,21 @@ int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
|
||||
uint64_t pci_mem_size);
|
||||
int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
|
||||
|
||||
int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, uint64_t diag_buffer_len);
|
||||
int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len);
|
||||
int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
|
||||
uint64_t diag_buffer_len);
|
||||
int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
|
||||
uint64_t diag_buffer_len);
|
||||
int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
|
||||
uint64_t diag_buffer_len);
|
||||
int64_t opal_pci_fence_phb(uint64_t phb_id);
|
||||
int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
|
||||
int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
|
||||
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
|
||||
int64_t opal_get_epow_status(uint64_t *status);
|
||||
int64_t opal_set_system_attention_led(uint8_t led_action);
|
||||
int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
|
||||
uint16_t *pci_error_type, uint16_t *severity);
|
||||
int64_t opal_pci_poll(uint64_t phb_id);
|
||||
|
||||
/* Internal functions */
|
||||
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
|
||||
@ -551,6 +644,11 @@ extern void hvc_opal_init_early(void);
|
||||
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
|
||||
int depth, void *data);
|
||||
|
||||
extern int opal_notifier_register(struct notifier_block *nb);
|
||||
extern void opal_notifier_enable(void);
|
||||
extern void opal_notifier_disable(void);
|
||||
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
|
||||
|
||||
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
|
||||
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
|
||||
|
||||
|
@ -60,6 +60,7 @@ struct power_pmu {
|
||||
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
|
||||
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
|
||||
#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
|
||||
#define PPMU_EBB 0x00000100 /* supports event based branch */
|
||||
|
||||
/*
|
||||
* Values for flags to get_alternatives()
|
||||
@ -68,6 +69,11 @@ struct power_pmu {
|
||||
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
|
||||
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
|
||||
|
||||
/*
|
||||
* We use the event config bit 63 as a flag to request EBB.
|
||||
*/
|
||||
#define EVENT_CONFIG_EBB_SHIFT 63
|
||||
|
||||
extern int register_power_pmu(struct power_pmu *);
|
||||
|
||||
struct pt_regs;
|
||||
|
@ -221,17 +221,17 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
|
||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
||||
GFP_KERNEL|__GFP_REPEAT);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
|
||||
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
||||
}
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd, addr) \
|
||||
pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
|
||||
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
#define __pud_free_tlb(tlb, pud, addr) \
|
||||
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
|
||||
|
@ -33,7 +33,8 @@
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
|
||||
/* Bits to mask out from a PMD to get to the PTE page */
|
||||
#define PMD_MASKED_BITS 0x1ff
|
||||
/* PMDs point to PTE table fragments which are 4K aligned. */
|
||||
#define PMD_MASKED_BITS 0xfff
|
||||
/* Bits to mask out from a PGD/PUD to get to the PMD page */
|
||||
#define PUD_MASKED_BITS 0x1ff
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#else
|
||||
#include <asm/pgtable-ppc64-4k.h>
|
||||
#endif
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
@ -20,7 +21,11 @@
|
||||
PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
|
||||
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
|
||||
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
|
||||
#else
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
#endif
|
||||
/*
|
||||
* Define the address range of the kernel non-linear virtual area
|
||||
*/
|
||||
@ -150,7 +155,7 @@
|
||||
#define pmd_present(pmd) (pmd_val(pmd) != 0)
|
||||
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
|
||||
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
|
||||
#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
|
||||
extern struct page *pmd_page(pmd_t pmd);
|
||||
|
||||
#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
|
||||
#define pud_none(pud) (!pud_val(pud))
|
||||
@ -339,43 +344,217 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
|
||||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
void pgtable_cache_init(void);
|
||||
|
||||
/*
|
||||
* find_linux_pte returns the address of a linux pte for a given
|
||||
* effective address and directory. If not found, it returns zero.
|
||||
*/
|
||||
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
|
||||
{
|
||||
pgd_t *pg;
|
||||
pud_t *pu;
|
||||
pmd_t *pm;
|
||||
pte_t *pt = NULL;
|
||||
|
||||
pg = pgdir + pgd_index(ea);
|
||||
if (!pgd_none(*pg)) {
|
||||
pu = pud_offset(pg, ea);
|
||||
if (!pud_none(*pu)) {
|
||||
pm = pmd_offset(pu, ea);
|
||||
if (pmd_present(*pm))
|
||||
pt = pte_offset_kernel(pm, ea);
|
||||
}
|
||||
}
|
||||
return pt;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
unsigned *shift);
|
||||
#else
|
||||
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
unsigned *shift)
|
||||
{
|
||||
if (shift)
|
||||
*shift = 0;
|
||||
return find_linux_pte(pgdir, ea);
|
||||
}
|
||||
#endif /* !CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* THP pages can't be special. So use the _PAGE_SPECIAL
|
||||
*/
|
||||
#define _PAGE_SPLITTING _PAGE_SPECIAL
|
||||
|
||||
/*
|
||||
* We need to differentiate between explicit huge page and THP huge
|
||||
* page, since THP huge page also need to track real subpage details
|
||||
*/
|
||||
#define _PAGE_THP_HUGE _PAGE_4K_PFN
|
||||
|
||||
/*
|
||||
* set of bits not changed in pmd_modify.
|
||||
*/
|
||||
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \
|
||||
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
|
||||
_PAGE_THP_HUGE)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* The linux hugepage PMD now include the pmd entries followed by the address
|
||||
* to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
|
||||
* [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
|
||||
* each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
|
||||
* with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
|
||||
*
|
||||
* The last three bits are intentionally left to zero. This memory location
|
||||
* are also used as normal page PTE pointers. So if we have any pointers
|
||||
* left around while we collapse a hugepage, we need to make sure
|
||||
* _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them
|
||||
*/
|
||||
static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
|
||||
{
|
||||
return (hpte_slot_array[index] >> 3) & 0x1;
|
||||
}
|
||||
|
||||
static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
|
||||
int index)
|
||||
{
|
||||
return hpte_slot_array[index] >> 4;
|
||||
}
|
||||
|
||||
static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
|
||||
unsigned int index, unsigned int hidx)
|
||||
{
|
||||
hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
|
||||
}
|
||||
|
||||
static inline char *get_hpte_slot_array(pmd_t *pmdp)
|
||||
{
|
||||
/*
|
||||
* The hpte hindex is stored in the pgtable whose address is in the
|
||||
* second half of the PMD
|
||||
*
|
||||
* Order this load with the test for pmd_trans_huge in the caller
|
||||
*/
|
||||
smp_rmb();
|
||||
return *(char **)(pmdp + PTRS_PER_PMD);
|
||||
|
||||
|
||||
}
|
||||
|
||||
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
|
||||
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
|
||||
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
|
||||
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd);
|
||||
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd);
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* leaf pte for huge page, bottom two bits != 00
|
||||
*/
|
||||
return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
|
||||
}
|
||||
|
||||
static inline int pmd_large(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* leaf pte for huge page, bottom two bits != 00
|
||||
*/
|
||||
if (pmd_trans_huge(pmd))
|
||||
return pmd_val(pmd) & _PAGE_PRESENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pmd_trans_splitting(pmd_t pmd)
|
||||
{
|
||||
if (pmd_trans_huge(pmd))
|
||||
return pmd_val(pmd) & _PAGE_SPLITTING;
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern int has_transparent_hugepage(void);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
{
|
||||
return __pte(pmd_val(pmd));
|
||||
}
|
||||
|
||||
static inline pmd_t pte_pmd(pte_t pte)
|
||||
{
|
||||
return __pmd(pte_val(pte));
|
||||
}
|
||||
|
||||
static inline pte_t *pmdp_ptep(pmd_t *pmd)
|
||||
{
|
||||
return (pte_t *)pmd;
|
||||
}
|
||||
|
||||
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
|
||||
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
|
||||
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
||||
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
||||
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
/* Do nothing, mk_pmd() does this part. */
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) &= ~_PAGE_PRESENT;
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mksplitting(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) |= _PAGE_SPLITTING;
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMD_SAME
|
||||
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
|
||||
{
|
||||
return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
||||
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp,
|
||||
pmd_t entry, int dirty);
|
||||
|
||||
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pmd_t *pmdp, unsigned long clr);
|
||||
|
||||
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
unsigned long old;
|
||||
|
||||
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
|
||||
return 0;
|
||||
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
|
||||
return ((old & _PAGE_ACCESSED) != 0);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
|
||||
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
|
||||
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
|
||||
extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
||||
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
|
||||
return;
|
||||
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
|
||||
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PGTABLE_DEPOSIT
|
||||
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
pgtable_t pgtable);
|
||||
#define __HAVE_ARCH_PGTABLE_WITHDRAW
|
||||
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_INVALIDATE
|
||||
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
|
||||
|
@ -217,6 +217,12 @@ extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
|
||||
|
||||
extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr);
|
||||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_large(pmd) 0
|
||||
#define has_transparent_hugepage() 0
|
||||
#endif
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
unsigned *shift);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -38,5 +38,30 @@ typedef u32 ppc_opcode_t;
|
||||
#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
#define MSR_SINGLESTEP (MSR_DE)
|
||||
#else
|
||||
#define MSR_SINGLESTEP (MSR_SE)
|
||||
#endif
|
||||
|
||||
/* Enable single stepping for the current task */
|
||||
static inline void enable_single_step(struct pt_regs *regs)
|
||||
{
|
||||
regs->msr |= MSR_SINGLESTEP;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
/*
|
||||
* We turn off Critical Input Exception(CE) to ensure that the single
|
||||
* step will be for the instruction we have the probe on; if we don't,
|
||||
* it is possible we'd get the single step reported for CE.
|
||||
*/
|
||||
regs->msr &= ~MSR_CE;
|
||||
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
|
||||
#ifdef CONFIG_PPC_47x
|
||||
isync();
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_PROBES_H */
|
||||
|
@ -168,10 +168,10 @@ struct thread_struct {
|
||||
* The following help to manage the use of Debug Control Registers
|
||||
* om the BookE platforms.
|
||||
*/
|
||||
unsigned long dbcr0;
|
||||
unsigned long dbcr1;
|
||||
uint32_t dbcr0;
|
||||
uint32_t dbcr1;
|
||||
#ifdef CONFIG_BOOKE
|
||||
unsigned long dbcr2;
|
||||
uint32_t dbcr2;
|
||||
#endif
|
||||
/*
|
||||
* The stored value of the DBSR register will be the value at the
|
||||
@ -179,7 +179,7 @@ struct thread_struct {
|
||||
* user (will never be written to) and has value while helping to
|
||||
* describe the reason for the last debug trap. Torez
|
||||
*/
|
||||
unsigned long dbsr;
|
||||
uint32_t dbsr;
|
||||
/*
|
||||
* The following will contain addresses used by debug applications
|
||||
* to help trace and trap on particular address locations.
|
||||
@ -200,7 +200,7 @@ struct thread_struct {
|
||||
#endif
|
||||
#endif
|
||||
/* FP and VSX 0-31 register set */
|
||||
double fpr[32][TS_FPRWIDTH];
|
||||
double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
|
||||
struct {
|
||||
|
||||
unsigned int pad;
|
||||
@ -287,9 +287,9 @@ struct thread_struct {
|
||||
unsigned long siar;
|
||||
unsigned long sdar;
|
||||
unsigned long sier;
|
||||
unsigned long mmcr0;
|
||||
unsigned long mmcr2;
|
||||
unsigned long mmcra;
|
||||
unsigned mmcr0;
|
||||
unsigned used_ebb;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -404,9 +404,7 @@ static inline void prefetchw(const void *x)
|
||||
|
||||
#define spin_lock_prefetch(x) prefetchw(x)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
|
||||
|
@ -621,11 +621,15 @@
|
||||
#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
|
||||
#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
|
||||
#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
|
||||
#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
|
||||
#define MMCR0_PMCC 0x000c0000UL /* PMC control */
|
||||
#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
|
||||
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
|
||||
#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
|
||||
#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
|
||||
#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
|
||||
#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
|
||||
#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
|
||||
#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
|
||||
#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
|
||||
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
|
||||
@ -673,6 +677,11 @@
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
|
||||
/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
|
||||
#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
|
||||
#define MMCR2_USER_MASK 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
|
||||
#define SIER_USER_MASK 0x7fffffUL
|
||||
|
||||
#define SPRN_PA6T_MMCR0 795
|
||||
#define PA6T_MMCR0_EN0 0x0000000000000001UL
|
||||
#define PA6T_MMCR0_EN1 0x0000000000000002UL
|
||||
|
@ -350,8 +350,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
|
||||
(devfn << 8) | (reg & 0xff);
|
||||
}
|
||||
|
||||
extern void __cpuinit rtas_give_timebase(void);
|
||||
extern void __cpuinit rtas_take_timebase(void);
|
||||
extern void rtas_give_timebase(void);
|
||||
extern void rtas_take_timebase(void);
|
||||
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
static inline int page_is_rtas_user_buf(unsigned long pfn)
|
||||
|
@ -67,4 +67,18 @@ static inline void flush_spe_to_thread(struct task_struct *t)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void clear_task_ebb(struct task_struct *t)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* EBB perf events are not inherited, so clear all EBB state. */
|
||||
t->thread.bescr = 0;
|
||||
t->thread.mmcr2 = 0;
|
||||
t->thread.mmcr0 = 0;
|
||||
t->thread.siar = 0;
|
||||
t->thread.sdar = 0;
|
||||
t->thread.sier = 0;
|
||||
t->thread.used_ebb = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_SWITCH_TO_H */
|
||||
|
@ -165,7 +165,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
/* Private function for use by PCI IO mapping code */
|
||||
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr);
|
||||
#else
|
||||
#error Unsupported MMU type
|
||||
#endif
|
||||
|
@ -22,7 +22,7 @@ extern unsigned long vdso64_rt_sigtramp;
|
||||
extern unsigned long vdso32_sigtramp;
|
||||
extern unsigned long vdso32_rt_sigtramp;
|
||||
|
||||
int __cpuinit vdso_getcpu_init(void);
|
||||
int vdso_getcpu_init(void);
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -58,6 +58,8 @@ obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
|
||||
obj-$(CONFIG_LPARCFG) += lparcfg.o
|
||||
obj-$(CONFIG_IBMVIO) += vio.o
|
||||
obj-$(CONFIG_IBMEBUS) += ibmebus.o
|
||||
obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
|
||||
eeh_driver.o eeh_event.o eeh_sysfs.o
|
||||
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_FA_DUMP) += fadump.o
|
||||
@ -100,7 +102,7 @@ obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
|
||||
|
||||
pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
|
||||
pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o
|
||||
obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
|
||||
pci-common.o pci_of_scan.o
|
||||
obj-$(CONFIG_PCI_MSI) += msi.o
|
||||
|
@ -105,9 +105,6 @@ int main(void)
|
||||
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
|
||||
#else /* CONFIG_PPC64 */
|
||||
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
|
||||
DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
|
||||
@ -115,6 +112,9 @@ int main(void)
|
||||
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
|
||||
#endif /* CONFIG_SPE */
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
|
||||
#endif
|
||||
@ -132,7 +132,6 @@ int main(void)
|
||||
DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
|
||||
DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
|
||||
DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
|
||||
DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
|
||||
|
@ -131,7 +131,8 @@ static const char *cache_type_string(const struct cache *cache)
|
||||
return cache_type_info[cache->type].name;
|
||||
}
|
||||
|
||||
static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
|
||||
static void cache_init(struct cache *cache, int type, int level,
|
||||
struct device_node *ofnode)
|
||||
{
|
||||
cache->type = type;
|
||||
cache->level = level;
|
||||
@ -140,7 +141,7 @@ static void __cpuinit cache_init(struct cache *cache, int type, int level, struc
|
||||
list_add(&cache->list, &cache_list);
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
|
||||
static struct cache *new_cache(int type, int level, struct device_node *ofnode)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
@ -324,7 +325,8 @@ static bool cache_node_is_unified(const struct device_node *np)
|
||||
return of_get_property(np, "cache-unified", NULL);
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
|
||||
static struct cache *cache_do_one_devnode_unified(struct device_node *node,
|
||||
int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
@ -335,7 +337,8 @@ static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *
|
||||
return cache;
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
|
||||
static struct cache *cache_do_one_devnode_split(struct device_node *node,
|
||||
int level)
|
||||
{
|
||||
struct cache *dcache, *icache;
|
||||
|
||||
@ -357,7 +360,7 @@ err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
|
||||
static struct cache *cache_do_one_devnode(struct device_node *node, int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
@ -369,7 +372,8 @@ static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, in
|
||||
return cache;
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
|
||||
static struct cache *cache_lookup_or_instantiate(struct device_node *node,
|
||||
int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
@ -385,7 +389,7 @@ static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *n
|
||||
return cache;
|
||||
}
|
||||
|
||||
static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
|
||||
static void link_cache_lists(struct cache *smaller, struct cache *bigger)
|
||||
{
|
||||
while (smaller->next_local) {
|
||||
if (smaller->next_local == bigger)
|
||||
@ -396,13 +400,13 @@ static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigg
|
||||
smaller->next_local = bigger;
|
||||
}
|
||||
|
||||
static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
|
||||
static void do_subsidiary_caches_debugcheck(struct cache *cache)
|
||||
{
|
||||
WARN_ON_ONCE(cache->level != 1);
|
||||
WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
|
||||
}
|
||||
|
||||
static void __cpuinit do_subsidiary_caches(struct cache *cache)
|
||||
static void do_subsidiary_caches(struct cache *cache)
|
||||
{
|
||||
struct device_node *subcache_node;
|
||||
int level = cache->level;
|
||||
@ -423,7 +427,7 @@ static void __cpuinit do_subsidiary_caches(struct cache *cache)
|
||||
}
|
||||
}
|
||||
|
||||
static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
|
||||
static struct cache *cache_chain_instantiate(unsigned int cpu_id)
|
||||
{
|
||||
struct device_node *cpu_node;
|
||||
struct cache *cpu_cache = NULL;
|
||||
@ -448,7 +452,7 @@ out:
|
||||
return cpu_cache;
|
||||
}
|
||||
|
||||
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
|
||||
static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct device *dev;
|
||||
@ -653,7 +657,7 @@ static struct kobj_type cache_index_type = {
|
||||
.default_attrs = cache_index_default_attrs,
|
||||
};
|
||||
|
||||
static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
|
||||
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
|
||||
{
|
||||
const char *cache_name;
|
||||
const char *cache_type;
|
||||
@ -696,7 +700,8 @@ static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *d
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
|
||||
static void cacheinfo_create_index_dir(struct cache *cache, int index,
|
||||
struct cache_dir *cache_dir)
|
||||
{
|
||||
struct cache_index_dir *index_dir;
|
||||
int rc;
|
||||
@ -722,7 +727,8 @@ err:
|
||||
kfree(index_dir);
|
||||
}
|
||||
|
||||
static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
|
||||
static void cacheinfo_sysfs_populate(unsigned int cpu_id,
|
||||
struct cache *cache_list)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct cache *cache;
|
||||
@ -740,7 +746,7 @@ static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache
|
||||
}
|
||||
}
|
||||
|
||||
void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
|
||||
void cacheinfo_cpu_online(unsigned int cpu_id)
|
||||
{
|
||||
struct cache *cache;
|
||||
|
||||
|
@ -103,11 +103,8 @@ EXPORT_SYMBOL(eeh_subsystem_enabled);
|
||||
*/
|
||||
int eeh_probe_mode;
|
||||
|
||||
/* Global EEH mutex */
|
||||
DEFINE_MUTEX(eeh_mutex);
|
||||
|
||||
/* Lock to avoid races due to multiple reports of an error */
|
||||
static DEFINE_RAW_SPINLOCK(confirm_error_lock);
|
||||
DEFINE_RAW_SPINLOCK(confirm_error_lock);
|
||||
|
||||
/* Buffer for reporting pci register dumps. Its here in BSS, and
|
||||
* not dynamically alloced, so that it ends up in RMO where RTAS
|
||||
@ -235,16 +232,30 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
|
||||
{
|
||||
size_t loglen = 0;
|
||||
struct eeh_dev *edev;
|
||||
bool valid_cfg_log = true;
|
||||
|
||||
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
eeh_ops->configure_bridge(pe);
|
||||
eeh_pe_restore_bars(pe);
|
||||
/*
|
||||
* When the PHB is fenced or dead, it's pointless to collect
|
||||
* the data from PCI config space because it should return
|
||||
* 0xFF's. For ER, we still retrieve the data from the PCI
|
||||
* config space.
|
||||
*/
|
||||
if (eeh_probe_mode_dev() &&
|
||||
(pe->type & EEH_PE_PHB) &&
|
||||
(pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD)))
|
||||
valid_cfg_log = false;
|
||||
|
||||
pci_regs_buf[0] = 0;
|
||||
eeh_pe_for_each_dev(pe, edev) {
|
||||
loglen += eeh_gather_pci_data(edev, pci_regs_buf,
|
||||
EEH_PCI_REGS_LOG_LEN);
|
||||
}
|
||||
if (valid_cfg_log) {
|
||||
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
eeh_ops->configure_bridge(pe);
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
pci_regs_buf[0] = 0;
|
||||
eeh_pe_for_each_dev(pe, edev) {
|
||||
loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
|
||||
EEH_PCI_REGS_LOG_LEN - loglen);
|
||||
}
|
||||
}
|
||||
|
||||
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
|
||||
}
|
||||
@ -260,15 +271,74 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
||||
{
|
||||
pte_t *ptep;
|
||||
unsigned long pa;
|
||||
int hugepage_shift;
|
||||
|
||||
ptep = find_linux_pte(init_mm.pgd, token);
|
||||
/*
|
||||
* We won't find hugepages here, iomem
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
|
||||
if (!ptep)
|
||||
return token;
|
||||
WARN_ON(hugepage_shift);
|
||||
pa = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
|
||||
return pa | (token & (PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
/*
|
||||
* On PowerNV platform, we might already have fenced PHB there.
|
||||
* For that case, it's meaningless to recover frozen PE. Intead,
|
||||
* We have to handle fenced PHB firstly.
|
||||
*/
|
||||
static int eeh_phb_check_failure(struct eeh_pe *pe)
|
||||
{
|
||||
struct eeh_pe *phb_pe;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!eeh_probe_mode_dev())
|
||||
return -EPERM;
|
||||
|
||||
/* Find the PHB PE */
|
||||
phb_pe = eeh_phb_pe_get(pe->phb);
|
||||
if (!phb_pe) {
|
||||
pr_warning("%s Can't find PE for PHB#%d\n",
|
||||
__func__, pe->phb->global_number);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/* If the PHB has been in problematic state */
|
||||
eeh_serialize_lock(&flags);
|
||||
if (phb_pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check PHB state */
|
||||
ret = eeh_ops->get_state(phb_pe, NULL);
|
||||
if ((ret < 0) ||
|
||||
(ret == EEH_STATE_NOT_SUPPORT) ||
|
||||
(ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
|
||||
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Isolate the PHB and send event */
|
||||
eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
|
||||
eeh_serialize_unlock(flags);
|
||||
eeh_send_failure_event(phb_pe);
|
||||
|
||||
pr_err("EEH: PHB#%x failure detected\n",
|
||||
phb_pe->phb->global_number);
|
||||
dump_stack();
|
||||
|
||||
return 1;
|
||||
out:
|
||||
eeh_serialize_unlock(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
|
||||
* @edev: eeh device
|
||||
@ -319,13 +389,21 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On PowerNV platform, we might already have fenced PHB
|
||||
* there and we need take care of that firstly.
|
||||
*/
|
||||
ret = eeh_phb_check_failure(pe);
|
||||
if (ret > 0)
|
||||
return ret;
|
||||
|
||||
/* If we already have a pending isolation event for this
|
||||
* slot, we know it's bad already, we don't need to check.
|
||||
* Do this checking under a lock; as multiple PCI devices
|
||||
* in one slot might report errors simultaneously, and we
|
||||
* only want one error recovery routine running.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&confirm_error_lock, flags);
|
||||
eeh_serialize_lock(&flags);
|
||||
rc = 1;
|
||||
if (pe->state & EEH_PE_ISOLATED) {
|
||||
pe->check_count++;
|
||||
@ -368,13 +446,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
|
||||
}
|
||||
|
||||
eeh_stats.slot_resets++;
|
||||
|
||||
|
||||
/* Avoid repeated reports of this failure, including problems
|
||||
* with other functions on this device, and functions under
|
||||
* bridges.
|
||||
*/
|
||||
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
|
||||
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
|
||||
eeh_serialize_unlock(flags);
|
||||
|
||||
eeh_send_failure_event(pe);
|
||||
|
||||
@ -382,11 +460,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
|
||||
* a stack trace will help the device-driver authors figure
|
||||
* out what happened. So print that out.
|
||||
*/
|
||||
WARN(1, "EEH: failure detected\n");
|
||||
pr_err("EEH: Frozen PE#%x detected on PHB#%x\n",
|
||||
pe->addr, pe->phb->global_number);
|
||||
dump_stack();
|
||||
|
||||
return 1;
|
||||
|
||||
dn_unlock:
|
||||
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
|
||||
eeh_serialize_unlock(flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -525,7 +606,7 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
|
||||
* or a fundamental reset (3).
|
||||
* A fundamental reset required by any device under
|
||||
* Partitionable Endpoint trumps hot-reset.
|
||||
*/
|
||||
*/
|
||||
eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
|
||||
|
||||
if (freset)
|
||||
@ -538,8 +619,8 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
|
||||
*/
|
||||
#define PCI_BUS_RST_HOLD_TIME_MSEC 250
|
||||
msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
|
||||
|
||||
/* We might get hit with another EEH freeze as soon as the
|
||||
|
||||
/* We might get hit with another EEH freeze as soon as the
|
||||
* pci slot reset line is dropped. Make sure we don't miss
|
||||
* these, and clear the flag now.
|
||||
*/
|
||||
@ -565,6 +646,7 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
|
||||
*/
|
||||
int eeh_reset_pe(struct eeh_pe *pe)
|
||||
{
|
||||
int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
|
||||
int i, rc;
|
||||
|
||||
/* Take three shots at resetting the bus */
|
||||
@ -572,7 +654,7 @@ int eeh_reset_pe(struct eeh_pe *pe)
|
||||
eeh_reset_pe_once(pe);
|
||||
|
||||
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
|
||||
if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
|
||||
if ((rc & flags) == flags)
|
||||
return 0;
|
||||
|
||||
if (rc < 0) {
|
||||
@ -604,7 +686,7 @@ void eeh_save_bars(struct eeh_dev *edev)
|
||||
if (!edev)
|
||||
return;
|
||||
dn = eeh_dev_to_of_node(edev);
|
||||
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
|
||||
}
|
||||
@ -674,11 +756,21 @@ int __exit eeh_ops_unregister(const char *name)
|
||||
* Even if force-off is set, the EEH hardware is still enabled, so that
|
||||
* newer systems can boot.
|
||||
*/
|
||||
static int __init eeh_init(void)
|
||||
int eeh_init(void)
|
||||
{
|
||||
struct pci_controller *hose, *tmp;
|
||||
struct device_node *phb;
|
||||
int ret;
|
||||
static int cnt = 0;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* We have to delay the initialization on PowerNV after
|
||||
* the PCI hierarchy tree has been built because the PEs
|
||||
* are figured out based on PCI devices instead of device
|
||||
* tree nodes
|
||||
*/
|
||||
if (machine_is(powernv) && cnt++ <= 0)
|
||||
return ret;
|
||||
|
||||
/* call platform initialization function */
|
||||
if (!eeh_ops) {
|
||||
@ -691,7 +783,10 @@ static int __init eeh_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
raw_spin_lock_init(&confirm_error_lock);
|
||||
/* Initialize EEH event */
|
||||
ret = eeh_event_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Enable EEH for all adapters */
|
||||
if (eeh_probe_mode_devtree()) {
|
||||
@ -700,6 +795,25 @@ static int __init eeh_init(void)
|
||||
phb = hose->dn;
|
||||
traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
|
||||
}
|
||||
} else if (eeh_probe_mode_dev()) {
|
||||
list_for_each_entry_safe(hose, tmp,
|
||||
&hose_list, list_node)
|
||||
pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
|
||||
} else {
|
||||
pr_warning("%s: Invalid probe mode %d\n",
|
||||
__func__, eeh_probe_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call platform post-initialization. Actually, It's good chance
|
||||
* to inform platform that EEH is ready to supply service if the
|
||||
* I/O cache stuff has been built up.
|
||||
*/
|
||||
if (eeh_ops->post_init) {
|
||||
ret = eeh_ops->post_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (eeh_subsystem_enabled)
|
||||
@ -728,6 +842,14 @@ static void eeh_add_device_early(struct device_node *dn)
|
||||
{
|
||||
struct pci_controller *phb;
|
||||
|
||||
/*
|
||||
* If we're doing EEH probe based on PCI device, we
|
||||
* would delay the probe until late stage because
|
||||
* the PCI device isn't available this moment.
|
||||
*/
|
||||
if (!eeh_probe_mode_devtree())
|
||||
return;
|
||||
|
||||
if (!of_node_to_eeh_dev(dn))
|
||||
return;
|
||||
phb = of_node_to_eeh_dev(dn)->phb;
|
||||
@ -736,7 +858,6 @@ static void eeh_add_device_early(struct device_node *dn)
|
||||
if (NULL == phb || 0 == phb->buid)
|
||||
return;
|
||||
|
||||
/* FIXME: hotplug support on POWERNV */
|
||||
eeh_ops->of_probe(dn, NULL);
|
||||
}
|
||||
|
||||
@ -787,6 +908,13 @@ static void eeh_add_device_late(struct pci_dev *dev)
|
||||
edev->pdev = dev;
|
||||
dev->dev.archdata.edev = edev;
|
||||
|
||||
/*
|
||||
* We have to do the EEH probe here because the PCI device
|
||||
* hasn't been created yet in the early stage.
|
||||
*/
|
||||
if (eeh_probe_mode_dev())
|
||||
eeh_ops->dev_probe(dev, NULL);
|
||||
|
||||
eeh_addr_cache_insert_dev(dev);
|
||||
}
|
||||
|
||||
@ -803,12 +931,12 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
|
@ -194,7 +194,7 @@ static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
/* Skip any devices for which EEH is not enabled. */
|
||||
if (!edev->pe) {
|
||||
if (!eeh_probe_mode_dev() && !edev->pe) {
|
||||
#ifdef DEBUG
|
||||
pr_info("PCI: skip building address cache for=%s - %s\n",
|
||||
pci_name(dev), dn->full_name);
|
||||
@ -285,7 +285,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
|
||||
* Must be run late in boot process, after the pci controllers
|
||||
* have been scanned for devices (after all device resources are known).
|
||||
*/
|
||||
void __init eeh_addr_cache_build(void)
|
||||
void eeh_addr_cache_build(void)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct eeh_dev *edev;
|
||||
@ -316,4 +316,3 @@ void __init eeh_addr_cache_build(void)
|
||||
eeh_addr_cache_print(&pci_io_addr_cache_root);
|
||||
#endif
|
||||
}
|
||||
|
@ -154,9 +154,9 @@ static void eeh_enable_irq(struct pci_dev *dev)
|
||||
* eeh_report_error - Report pci error to each device driver
|
||||
* @data: eeh device
|
||||
* @userdata: return value
|
||||
*
|
||||
* Report an EEH error to each device driver, collect up and
|
||||
* merge the device driver responses. Cumulative response
|
||||
*
|
||||
* Report an EEH error to each device driver, collect up and
|
||||
* merge the device driver responses. Cumulative response
|
||||
* passed back in "userdata".
|
||||
*/
|
||||
static void *eeh_report_error(void *data, void *userdata)
|
||||
@ -349,10 +349,12 @@ static void *eeh_report_failure(void *data, void *userdata)
|
||||
*/
|
||||
static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||
{
|
||||
struct timeval tstamp;
|
||||
int cnt, rc;
|
||||
|
||||
/* pcibios will clear the counter; save the value */
|
||||
cnt = pe->freeze_count;
|
||||
tstamp = pe->tstamp;
|
||||
|
||||
/*
|
||||
* We don't remove the corresponding PE instances because
|
||||
@ -376,15 +378,17 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
/* Give the system 5 seconds to finish running the user-space
|
||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||
* this is a hack, but if we don't do this, and try to bring
|
||||
* the device up before the scripts have taken it down,
|
||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||
* this is a hack, but if we don't do this, and try to bring
|
||||
* the device up before the scripts have taken it down,
|
||||
* potentially weird things happen.
|
||||
*/
|
||||
if (bus) {
|
||||
ssleep(5);
|
||||
pcibios_add_pci_devices(bus);
|
||||
}
|
||||
|
||||
pe->tstamp = tstamp;
|
||||
pe->freeze_count = cnt;
|
||||
|
||||
return 0;
|
||||
@ -395,24 +399,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||
*/
|
||||
#define MAX_WAIT_FOR_RECOVERY 150
|
||||
|
||||
/**
|
||||
* eeh_handle_event - Reset a PCI device after hard lockup.
|
||||
* @pe: EEH PE
|
||||
*
|
||||
* While PHB detects address or data parity errors on particular PCI
|
||||
* slot, the associated PE will be frozen. Besides, DMA's occurring
|
||||
* to wild addresses (which usually happen due to bugs in device
|
||||
* drivers or in PCI adapter firmware) can cause EEH error. #SERR,
|
||||
* #PERR or other misc PCI-related errors also can trigger EEH errors.
|
||||
*
|
||||
* Recovery process consists of unplugging the device driver (which
|
||||
* generated hotplug events to userspace), then issuing a PCI #RST to
|
||||
* the device, then reconfiguring the PCI config space for all bridges
|
||||
* & devices under this slot, and then finally restarting the device
|
||||
* drivers (which cause a second set of hotplug events to go out to
|
||||
* userspace).
|
||||
*/
|
||||
void eeh_handle_event(struct eeh_pe *pe)
|
||||
static void eeh_handle_normal_event(struct eeh_pe *pe)
|
||||
{
|
||||
struct pci_bus *frozen_bus;
|
||||
int rc = 0;
|
||||
@ -425,6 +412,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
return;
|
||||
}
|
||||
|
||||
eeh_pe_update_time_stamp(pe);
|
||||
pe->freeze_count++;
|
||||
if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
|
||||
goto excess_failures;
|
||||
@ -437,6 +425,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
* status ... if any child can't handle the reset, then the entire
|
||||
* slot is dlpar removed and added.
|
||||
*/
|
||||
pr_info("EEH: Notify device drivers to shutdown\n");
|
||||
eeh_pe_dev_traverse(pe, eeh_report_error, &result);
|
||||
|
||||
/* Get the current PCI slot state. This can take a long time,
|
||||
@ -444,7 +433,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
*/
|
||||
rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
|
||||
if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
|
||||
printk(KERN_WARNING "EEH: Permanent failure\n");
|
||||
pr_warning("EEH: Permanent failure\n");
|
||||
goto hard_fail;
|
||||
}
|
||||
|
||||
@ -452,6 +441,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
* don't post the error log until after all dev drivers
|
||||
* have been informed.
|
||||
*/
|
||||
pr_info("EEH: Collect temporary log\n");
|
||||
eeh_slot_error_detail(pe, EEH_LOG_TEMP);
|
||||
|
||||
/* If all device drivers were EEH-unaware, then shut
|
||||
@ -459,15 +449,18 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
* go down willingly, without panicing the system.
|
||||
*/
|
||||
if (result == PCI_ERS_RESULT_NONE) {
|
||||
pr_info("EEH: Reset with hotplug activity\n");
|
||||
rc = eeh_reset_device(pe, frozen_bus);
|
||||
if (rc) {
|
||||
printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
|
||||
pr_warning("%s: Unable to reset, err=%d\n",
|
||||
__func__, rc);
|
||||
goto hard_fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* If all devices reported they can proceed, then re-enable MMIO */
|
||||
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
|
||||
pr_info("EEH: Enable I/O for affected devices\n");
|
||||
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
|
||||
if (rc < 0)
|
||||
@ -475,6 +468,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
if (rc) {
|
||||
result = PCI_ERS_RESULT_NEED_RESET;
|
||||
} else {
|
||||
pr_info("EEH: Notify device drivers to resume I/O\n");
|
||||
result = PCI_ERS_RESULT_NONE;
|
||||
eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
|
||||
}
|
||||
@ -482,6 +476,7 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
|
||||
/* If all devices reported they can proceed, then re-enable DMA */
|
||||
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
|
||||
pr_info("EEH: Enabled DMA for affected devices\n");
|
||||
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
|
||||
|
||||
if (rc < 0)
|
||||
@ -494,17 +489,22 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
|
||||
/* If any device has a hard failure, then shut off everything. */
|
||||
if (result == PCI_ERS_RESULT_DISCONNECT) {
|
||||
printk(KERN_WARNING "EEH: Device driver gave up\n");
|
||||
pr_warning("EEH: Device driver gave up\n");
|
||||
goto hard_fail;
|
||||
}
|
||||
|
||||
/* If any device called out for a reset, then reset the slot */
|
||||
if (result == PCI_ERS_RESULT_NEED_RESET) {
|
||||
pr_info("EEH: Reset without hotplug activity\n");
|
||||
rc = eeh_reset_device(pe, NULL);
|
||||
if (rc) {
|
||||
printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
|
||||
pr_warning("%s: Cannot reset, err=%d\n",
|
||||
__func__, rc);
|
||||
goto hard_fail;
|
||||
}
|
||||
|
||||
pr_info("EEH: Notify device drivers "
|
||||
"the completion of reset\n");
|
||||
result = PCI_ERS_RESULT_NONE;
|
||||
eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
|
||||
}
|
||||
@ -512,15 +512,16 @@ void eeh_handle_event(struct eeh_pe *pe)
|
||||
/* All devices should claim they have recovered by now. */
|
||||
if ((result != PCI_ERS_RESULT_RECOVERED) &&
|
||||
(result != PCI_ERS_RESULT_NONE)) {
|
||||
printk(KERN_WARNING "EEH: Not recovered\n");
|
||||
pr_warning("EEH: Not recovered\n");
|
||||
goto hard_fail;
|
||||
}
|
||||
|
||||
/* Tell all device drivers that they can resume operations */
|
||||
pr_info("EEH: Notify device driver to resume\n");
|
||||
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
|
||||
|
||||
return;
|
||||
|
||||
|
||||
excess_failures:
|
||||
/*
|
||||
* About 90% of all real-life EEH failures in the field
|
||||
@ -550,3 +551,111 @@ perm_error:
|
||||
pcibios_remove_pci_devices(frozen_bus);
|
||||
}
|
||||
|
||||
static void eeh_handle_special_event(void)
|
||||
{
|
||||
struct eeh_pe *pe, *phb_pe;
|
||||
struct pci_bus *bus;
|
||||
struct pci_controller *hose, *tmp;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* The return value from next_error() has been classified as follows.
|
||||
* It might be good to enumerate them. However, next_error() is only
|
||||
* supported by PowerNV platform for now. So it would be fine to use
|
||||
* integer directly:
|
||||
*
|
||||
* 4 - Dead IOC 3 - Dead PHB
|
||||
* 2 - Fenced PHB 1 - Frozen PE
|
||||
* 0 - No error found
|
||||
*
|
||||
*/
|
||||
rc = eeh_ops->next_error(&pe);
|
||||
if (rc <= 0)
|
||||
return;
|
||||
|
||||
switch (rc) {
|
||||
case 4:
|
||||
/* Mark all PHBs in dead state */
|
||||
eeh_serialize_lock(&flags);
|
||||
list_for_each_entry_safe(hose, tmp,
|
||||
&hose_list, list_node) {
|
||||
phb_pe = eeh_phb_pe_get(hose);
|
||||
if (!phb_pe) continue;
|
||||
|
||||
eeh_pe_state_mark(phb_pe,
|
||||
EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
|
||||
}
|
||||
eeh_serialize_unlock(flags);
|
||||
|
||||
/* Purge all events */
|
||||
eeh_remove_event(NULL);
|
||||
break;
|
||||
case 3:
|
||||
case 2:
|
||||
case 1:
|
||||
/* Mark the PE in fenced state */
|
||||
eeh_serialize_lock(&flags);
|
||||
if (rc == 3)
|
||||
eeh_pe_state_mark(pe,
|
||||
EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
|
||||
else
|
||||
eeh_pe_state_mark(pe,
|
||||
EEH_PE_ISOLATED | EEH_PE_RECOVERING);
|
||||
eeh_serialize_unlock(flags);
|
||||
|
||||
/* Purge all events of the PHB */
|
||||
eeh_remove_event(pe);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: Invalid value %d from next_error()\n",
|
||||
__func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For fenced PHB and frozen PE, it's handled as normal
|
||||
* event. We have to remove the affected PHBs for dead
|
||||
* PHB and IOC
|
||||
*/
|
||||
if (rc == 2 || rc == 1)
|
||||
eeh_handle_normal_event(pe);
|
||||
else {
|
||||
list_for_each_entry_safe(hose, tmp,
|
||||
&hose_list, list_node) {
|
||||
phb_pe = eeh_phb_pe_get(hose);
|
||||
if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
|
||||
continue;
|
||||
|
||||
bus = eeh_pe_bus_get(phb_pe);
|
||||
/* Notify all devices that they're about to go down. */
|
||||
eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
|
||||
pcibios_remove_pci_devices(bus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_handle_event - Reset a PCI device after hard lockup.
|
||||
* @pe: EEH PE
|
||||
*
|
||||
* While PHB detects address or data parity errors on particular PCI
|
||||
* slot, the associated PE will be frozen. Besides, DMA's occurring
|
||||
* to wild addresses (which usually happen due to bugs in device
|
||||
* drivers or in PCI adapter firmware) can cause EEH error. #SERR,
|
||||
* #PERR or other misc PCI-related errors also can trigger EEH errors.
|
||||
*
|
||||
* Recovery process consists of unplugging the device driver (which
|
||||
* generated hotplug events to userspace), then issuing a PCI #RST to
|
||||
* the device, then reconfiguring the PCI config space for all bridges
|
||||
* & devices under this slot, and then finally restarting the device
|
||||
* drivers (which cause a second set of hotplug events to go out to
|
||||
* userspace).
|
||||
*/
|
||||
void eeh_handle_event(struct eeh_pe *pe)
|
||||
{
|
||||
if (pe)
|
||||
eeh_handle_normal_event(pe);
|
||||
else
|
||||
eeh_handle_special_event();
|
||||
}
|
@ -18,11 +18,10 @@
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <asm/eeh_event.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
@ -35,14 +34,9 @@
|
||||
* work-queue, where a worker thread can drive recovery.
|
||||
*/
|
||||
|
||||
/* EEH event workqueue setup. */
|
||||
static DEFINE_SPINLOCK(eeh_eventlist_lock);
|
||||
static struct semaphore eeh_eventlist_sem;
|
||||
LIST_HEAD(eeh_eventlist);
|
||||
static void eeh_thread_launcher(struct work_struct *);
|
||||
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
|
||||
|
||||
/* Serialize reset sequences for a given pci device */
|
||||
DEFINE_MUTEX(eeh_event_mutex);
|
||||
|
||||
/**
|
||||
* eeh_event_handler - Dispatch EEH events.
|
||||
@ -60,55 +54,63 @@ static int eeh_event_handler(void * dummy)
|
||||
struct eeh_event *event;
|
||||
struct eeh_pe *pe;
|
||||
|
||||
spin_lock_irqsave(&eeh_eventlist_lock, flags);
|
||||
event = NULL;
|
||||
while (!kthread_should_stop()) {
|
||||
if (down_interruptible(&eeh_eventlist_sem))
|
||||
break;
|
||||
|
||||
/* Unqueue the event, get ready to process. */
|
||||
if (!list_empty(&eeh_eventlist)) {
|
||||
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
|
||||
list_del(&event->list);
|
||||
}
|
||||
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
|
||||
/* Fetch EEH event from the queue */
|
||||
spin_lock_irqsave(&eeh_eventlist_lock, flags);
|
||||
event = NULL;
|
||||
if (!list_empty(&eeh_eventlist)) {
|
||||
event = list_entry(eeh_eventlist.next,
|
||||
struct eeh_event, list);
|
||||
list_del(&event->list);
|
||||
}
|
||||
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
if (event == NULL)
|
||||
return 0;
|
||||
|
||||
/* Serialize processing of EEH events */
|
||||
mutex_lock(&eeh_event_mutex);
|
||||
pe = event->pe;
|
||||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||
pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
|
||||
pe->phb->global_number, pe->addr);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
|
||||
eeh_handle_event(pe);
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
|
||||
|
||||
kfree(event);
|
||||
mutex_unlock(&eeh_event_mutex);
|
||||
|
||||
/* If there are no new errors after an hour, clear the counter. */
|
||||
if (pe && pe->freeze_count > 0) {
|
||||
msleep_interruptible(3600*1000);
|
||||
if (pe->freeze_count > 0)
|
||||
pe->freeze_count--;
|
||||
/* We might have event without binding PE */
|
||||
pe = event->pe;
|
||||
if (pe) {
|
||||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||
pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
|
||||
pe->phb->global_number, pe->addr);
|
||||
eeh_handle_event(pe);
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
|
||||
} else {
|
||||
eeh_handle_event(NULL);
|
||||
}
|
||||
|
||||
kfree(event);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_thread_launcher - Start kernel thread to handle EEH events
|
||||
* @dummy - unused
|
||||
* eeh_event_init - Start kernel thread to handle EEH events
|
||||
*
|
||||
* This routine is called to start the kernel thread for processing
|
||||
* EEH event.
|
||||
*/
|
||||
static void eeh_thread_launcher(struct work_struct *dummy)
|
||||
int eeh_event_init(void)
|
||||
{
|
||||
if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd")))
|
||||
printk(KERN_ERR "Failed to start EEH daemon\n");
|
||||
struct task_struct *t;
|
||||
int ret = 0;
|
||||
|
||||
/* Initialize semaphore */
|
||||
sema_init(&eeh_eventlist_sem, 0);
|
||||
|
||||
t = kthread_run(eeh_event_handler, NULL, "eehd");
|
||||
if (IS_ERR(t)) {
|
||||
ret = PTR_ERR(t);
|
||||
pr_err("%s: Failed to start EEH daemon (%d)\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -136,7 +138,45 @@ int eeh_send_failure_event(struct eeh_pe *pe)
|
||||
list_add(&event->list, &eeh_eventlist);
|
||||
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
|
||||
|
||||
schedule_work(&eeh_event_wq);
|
||||
/* For EEH deamon to knick in */
|
||||
up(&eeh_eventlist_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_remove_event - Remove EEH event from the queue
|
||||
* @pe: Event binding to the PE
|
||||
*
|
||||
* On PowerNV platform, we might have subsequent coming events
|
||||
* is part of the former one. For that case, those subsequent
|
||||
* coming events are totally duplicated and unnecessary, thus
|
||||
* they should be removed.
|
||||
*/
|
||||
void eeh_remove_event(struct eeh_pe *pe)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct eeh_event *event, *tmp;
|
||||
|
||||
spin_lock_irqsave(&eeh_eventlist_lock, flags);
|
||||
list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
|
||||
/*
|
||||
* If we don't have valid PE passed in, that means
|
||||
* we already have event corresponding to dead IOC
|
||||
* and all events should be purged.
|
||||
*/
|
||||
if (!pe) {
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
} else if (pe->type & EEH_PE_PHB) {
|
||||
if (event->pe && event->pe->phb == pe->phb) {
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
}
|
||||
} else if (event->pe == pe) {
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
|
||||
}
|
@ -22,6 +22,7 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/init.h>
|
||||
@ -78,9 +79,7 @@ int eeh_phb_pe_create(struct pci_controller *phb)
|
||||
}
|
||||
|
||||
/* Put it into the list */
|
||||
eeh_lock();
|
||||
list_add_tail(&pe->child, &eeh_phb_pe);
|
||||
eeh_unlock();
|
||||
|
||||
pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
|
||||
|
||||
@ -95,7 +94,7 @@ int eeh_phb_pe_create(struct pci_controller *phb)
|
||||
* hierarchy tree is composed of PHB PEs. The function is used
|
||||
* to retrieve the corresponding PHB PE according to the given PHB.
|
||||
*/
|
||||
static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
|
||||
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
|
||||
{
|
||||
struct eeh_pe *pe;
|
||||
|
||||
@ -185,21 +184,15 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
eeh_lock();
|
||||
|
||||
/* Traverse root PE */
|
||||
for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
|
||||
eeh_pe_for_each_dev(pe, edev) {
|
||||
ret = fn(edev, flag);
|
||||
if (ret) {
|
||||
eeh_unlock();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eeh_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -228,7 +221,7 @@ static void *__eeh_pe_get(void *data, void *flag)
|
||||
return pe;
|
||||
|
||||
/* Try BDF address */
|
||||
if (edev->pe_config_addr &&
|
||||
if (edev->config_addr &&
|
||||
(edev->config_addr == pe->config_addr))
|
||||
return pe;
|
||||
|
||||
@ -246,7 +239,7 @@ static void *__eeh_pe_get(void *data, void *flag)
|
||||
* which is composed of PCI bus/device/function number, or unified
|
||||
* PE address.
|
||||
*/
|
||||
static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
|
||||
struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
|
||||
{
|
||||
struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
|
||||
struct eeh_pe *pe;
|
||||
@ -305,8 +298,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
{
|
||||
struct eeh_pe *pe, *parent;
|
||||
|
||||
eeh_lock();
|
||||
|
||||
/*
|
||||
* Search the PE has been existing or not according
|
||||
* to the PE address. If that has been existing, the
|
||||
@ -316,7 +307,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
pe = eeh_pe_get(edev);
|
||||
if (pe && !(pe->type & EEH_PE_INVALID)) {
|
||||
if (!edev->pe_config_addr) {
|
||||
eeh_unlock();
|
||||
pr_err("%s: PE with addr 0x%x already exists\n",
|
||||
__func__, edev->config_addr);
|
||||
return -EEXIST;
|
||||
@ -328,7 +318,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
|
||||
/* Put the edev to PE */
|
||||
list_add_tail(&edev->list, &pe->edevs);
|
||||
eeh_unlock();
|
||||
pr_debug("EEH: Add %s to Bus PE#%x\n",
|
||||
edev->dn->full_name, pe->addr);
|
||||
|
||||
@ -347,7 +336,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
parent->type &= ~EEH_PE_INVALID;
|
||||
parent = parent->parent;
|
||||
}
|
||||
eeh_unlock();
|
||||
pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
|
||||
edev->dn->full_name, pe->addr, pe->parent->addr);
|
||||
|
||||
@ -357,13 +345,23 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
/* Create a new EEH PE */
|
||||
pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
|
||||
if (!pe) {
|
||||
eeh_unlock();
|
||||
pr_err("%s: out of memory!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
pe->addr = edev->pe_config_addr;
|
||||
pe->config_addr = edev->config_addr;
|
||||
|
||||
/*
|
||||
* While doing PE reset, we probably hot-reset the
|
||||
* upstream bridge. However, the PCI devices including
|
||||
* the associated EEH devices might be removed when EEH
|
||||
* core is doing recovery. So that won't safe to retrieve
|
||||
* the bridge through downstream EEH device. We have to
|
||||
* trace the parent PCI bus, then the upstream bridge.
|
||||
*/
|
||||
if (eeh_probe_mode_dev())
|
||||
pe->bus = eeh_dev_to_pci_dev(edev)->bus;
|
||||
|
||||
/*
|
||||
* Put the new EEH PE into hierarchy tree. If the parent
|
||||
* can't be found, the newly created PE will be attached
|
||||
@ -374,7 +372,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
if (!parent) {
|
||||
parent = eeh_phb_pe_get(edev->phb);
|
||||
if (!parent) {
|
||||
eeh_unlock();
|
||||
pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
|
||||
__func__, edev->phb->global_number);
|
||||
edev->pe = NULL;
|
||||
@ -391,7 +388,6 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
|
||||
list_add_tail(&pe->child, &parent->child_list);
|
||||
list_add_tail(&edev->list, &pe->edevs);
|
||||
edev->pe = pe;
|
||||
eeh_unlock();
|
||||
pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
|
||||
edev->dn->full_name, pe->addr, pe->parent->addr);
|
||||
|
||||
@ -419,8 +415,6 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
eeh_lock();
|
||||
|
||||
/* Remove the EEH device */
|
||||
pe = edev->pe;
|
||||
edev->pe = NULL;
|
||||
@ -465,11 +459,36 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
|
||||
pe = parent;
|
||||
}
|
||||
|
||||
eeh_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_pe_update_time_stamp - Update PE's frozen time stamp
|
||||
* @pe: EEH PE
|
||||
*
|
||||
* We have time stamp for each PE to trace its time of getting
|
||||
* frozen in last hour. The function should be called to update
|
||||
* the time stamp on first error of the specific PE. On the other
|
||||
* handle, we needn't account for errors happened in last hour.
|
||||
*/
|
||||
void eeh_pe_update_time_stamp(struct eeh_pe *pe)
|
||||
{
|
||||
struct timeval tstamp;
|
||||
|
||||
if (!pe) return;
|
||||
|
||||
if (pe->freeze_count <= 0) {
|
||||
pe->freeze_count = 0;
|
||||
do_gettimeofday(&pe->tstamp);
|
||||
} else {
|
||||
do_gettimeofday(&tstamp);
|
||||
if (tstamp.tv_sec - pe->tstamp.tv_sec > 3600) {
|
||||
pe->tstamp = tstamp;
|
||||
pe->freeze_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __eeh_pe_state_mark - Mark the state for the PE
|
||||
* @data: EEH PE
|
||||
@ -512,9 +531,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
|
||||
*/
|
||||
void eeh_pe_state_mark(struct eeh_pe *pe, int state)
|
||||
{
|
||||
eeh_lock();
|
||||
eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
|
||||
eeh_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -548,35 +565,135 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
|
||||
*/
|
||||
void eeh_pe_state_clear(struct eeh_pe *pe, int state)
|
||||
{
|
||||
eeh_lock();
|
||||
eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
|
||||
eeh_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_restore_one_device_bars - Restore the Base Address Registers for one device
|
||||
* @data: EEH device
|
||||
* @flag: Unused
|
||||
/*
|
||||
* Some PCI bridges (e.g. PLX bridges) have primary/secondary
|
||||
* buses assigned explicitly by firmware, and we probably have
|
||||
* lost that after reset. So we have to delay the check until
|
||||
* the PCI-CFG registers have been restored for the parent
|
||||
* bridge.
|
||||
*
|
||||
* Loads the PCI configuration space base address registers,
|
||||
* the expansion ROM base address, the latency timer, and etc.
|
||||
* from the saved values in the device node.
|
||||
* Don't use normal PCI-CFG accessors, which probably has been
|
||||
* blocked on normal path during the stage. So we need utilize
|
||||
* eeh operations, which is always permitted.
|
||||
*/
|
||||
static void *eeh_restore_one_device_bars(void *data, void *flag)
|
||||
static void eeh_bridge_check_link(struct pci_dev *pdev,
|
||||
struct device_node *dn)
|
||||
{
|
||||
int cap;
|
||||
uint32_t val;
|
||||
int timeout = 0;
|
||||
|
||||
/*
|
||||
* We only check root port and downstream ports of
|
||||
* PCIe switches
|
||||
*/
|
||||
if (!pci_is_pcie(pdev) ||
|
||||
(pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
|
||||
pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
|
||||
return;
|
||||
|
||||
pr_debug("%s: Check PCIe link for %s ...\n",
|
||||
__func__, pci_name(pdev));
|
||||
|
||||
/* Check slot status */
|
||||
cap = pdev->pcie_cap;
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
|
||||
if (!(val & PCI_EXP_SLTSTA_PDS)) {
|
||||
pr_debug(" No card in the slot (0x%04x) !\n", val);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check power status if we have the capability */
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val);
|
||||
if (val & PCI_EXP_SLTCAP_PCP) {
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val);
|
||||
if (val & PCI_EXP_SLTCTL_PCC) {
|
||||
pr_debug(" In power-off state, power it on ...\n");
|
||||
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
|
||||
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
|
||||
eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val);
|
||||
msleep(2 * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable link */
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val);
|
||||
val &= ~PCI_EXP_LNKCTL_LD;
|
||||
eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val);
|
||||
|
||||
/* Check link */
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val);
|
||||
if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
|
||||
pr_debug(" No link reporting capability (0x%08x) \n", val);
|
||||
msleep(1000);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Wait the link is up until timeout (5s) */
|
||||
timeout = 0;
|
||||
while (timeout < 5000) {
|
||||
msleep(20);
|
||||
timeout += 20;
|
||||
|
||||
eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val);
|
||||
if (val & PCI_EXP_LNKSTA_DLLLA)
|
||||
break;
|
||||
}
|
||||
|
||||
if (val & PCI_EXP_LNKSTA_DLLLA)
|
||||
pr_debug(" Link up (%s)\n",
|
||||
(val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
|
||||
else
|
||||
pr_debug(" Link not ready (0x%04x)\n", val);
|
||||
}
|
||||
|
||||
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
|
||||
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
|
||||
|
||||
static void eeh_restore_bridge_bars(struct pci_dev *pdev,
|
||||
struct eeh_dev *edev,
|
||||
struct device_node *dn)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Device BARs: 0x10 - 0x18
|
||||
* Bus numbers and windows: 0x18 - 0x30
|
||||
*/
|
||||
for (i = 4; i < 13; i++)
|
||||
eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
|
||||
/* Rom: 0x38 */
|
||||
eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]);
|
||||
|
||||
/* Cache line & Latency timer: 0xC 0xD */
|
||||
eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
|
||||
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
|
||||
eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
|
||||
SAVED_BYTE(PCI_LATENCY_TIMER));
|
||||
/* Max latency, min grant, interrupt ping and line: 0x3C */
|
||||
eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
|
||||
|
||||
/* PCI Command: 0x4 */
|
||||
eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
|
||||
|
||||
/* Check the PCIe link is ready */
|
||||
eeh_bridge_check_link(pdev, dn);
|
||||
}
|
||||
|
||||
static void eeh_restore_device_bars(struct eeh_dev *edev,
|
||||
struct device_node *dn)
|
||||
{
|
||||
int i;
|
||||
u32 cmd;
|
||||
struct eeh_dev *edev = (struct eeh_dev *)data;
|
||||
struct device_node *dn = eeh_dev_to_of_node(edev);
|
||||
|
||||
for (i = 4; i < 10; i++)
|
||||
eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
|
||||
/* 12 == Expansion ROM Address */
|
||||
eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
|
||||
|
||||
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
|
||||
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
|
||||
|
||||
eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
|
||||
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
|
||||
eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
|
||||
@ -599,6 +716,34 @@ static void *eeh_restore_one_device_bars(void *data, void *flag)
|
||||
else
|
||||
cmd &= ~PCI_COMMAND_SERR;
|
||||
eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_restore_one_device_bars - Restore the Base Address Registers for one device
|
||||
* @data: EEH device
|
||||
* @flag: Unused
|
||||
*
|
||||
* Loads the PCI configuration space base address registers,
|
||||
* the expansion ROM base address, the latency timer, and etc.
|
||||
* from the saved values in the device node.
|
||||
*/
|
||||
static void *eeh_restore_one_device_bars(void *data, void *flag)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
struct eeh_dev *edev = (struct eeh_dev *)data;
|
||||
struct device_node *dn = eeh_dev_to_of_node(edev);
|
||||
|
||||
/* Trace the PCI bridge */
|
||||
if (eeh_probe_mode_dev()) {
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
|
||||
pdev = NULL;
|
||||
}
|
||||
|
||||
if (pdev)
|
||||
eeh_restore_bridge_bars(pdev, edev, dn);
|
||||
else
|
||||
eeh_restore_device_bars(edev, dn);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -635,19 +780,21 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
||||
struct eeh_dev *edev;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
eeh_lock();
|
||||
|
||||
if (pe->type & EEH_PE_PHB) {
|
||||
bus = pe->phb->bus;
|
||||
} else if (pe->type & EEH_PE_BUS ||
|
||||
pe->type & EEH_PE_DEVICE) {
|
||||
if (pe->bus) {
|
||||
bus = pe->bus;
|
||||
goto out;
|
||||
}
|
||||
|
||||
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (pdev)
|
||||
bus = pdev->bus;
|
||||
}
|
||||
|
||||
eeh_unlock();
|
||||
|
||||
out:
|
||||
return bus;
|
||||
}
|
@ -72,4 +72,3 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
|
||||
}
|
||||
|
@ -629,21 +629,43 @@ _GLOBAL(ret_from_except_lite)
|
||||
|
||||
CURRENT_THREAD_INFO(r9, r1)
|
||||
ld r3,_MSR(r1)
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
ld r10,PACACURRENT(r13)
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
ld r4,TI_FLAGS(r9)
|
||||
andi. r3,r3,MSR_PR
|
||||
beq resume_kernel
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
lwz r3,(THREAD+THREAD_DBCR0)(r10)
|
||||
#endif /* CONFIG_PPC_BOOK3E */
|
||||
|
||||
/* Check current_thread_info()->flags */
|
||||
andi. r0,r4,_TIF_USER_WORK_MASK
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
bne 1f
|
||||
/*
|
||||
* Check to see if the dbcr0 register is set up to debug.
|
||||
* Use the internal debug mode bit to do this.
|
||||
*/
|
||||
andis. r0,r3,DBCR0_IDM@h
|
||||
beq restore
|
||||
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 1f
|
||||
mfmsr r0
|
||||
rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
|
||||
mtmsr r0
|
||||
mtspr SPRN_DBCR0,r3
|
||||
li r10, -1
|
||||
mtspr SPRN_DBSR,r10
|
||||
b restore
|
||||
#else
|
||||
beq restore
|
||||
#endif
|
||||
1: andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq 2f
|
||||
bl .restore_interrupts
|
||||
SCHEDULE_USER
|
||||
b .ret_from_except_lite
|
||||
|
||||
1: bl .save_nvgprs
|
||||
2: bl .save_nvgprs
|
||||
bl .restore_interrupts
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_notify_resume
|
||||
|
@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1:
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b vsx_unavailable_pSeries
|
||||
|
||||
facility_unavailable_trampoline:
|
||||
. = 0xf60
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b tm_unavailable_pSeries
|
||||
b facility_unavailable_pSeries
|
||||
|
||||
hv_facility_unavailable_trampoline:
|
||||
. = 0xf80
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b facility_unavailable_hv
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
|
||||
@ -522,8 +529,10 @@ denorm_done:
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
|
||||
STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
|
||||
STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
|
||||
STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
|
||||
STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
|
||||
|
||||
/*
|
||||
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
|
||||
@ -793,14 +802,10 @@ system_call_relon_pSeries:
|
||||
STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
|
||||
|
||||
. = 0x4e00
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b h_data_storage_relon_hv
|
||||
b . /* Can't happen, see v2.07 Book III-S section 6.5 */
|
||||
|
||||
. = 0x4e20
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b h_instr_storage_relon_hv
|
||||
b . /* Can't happen, see v2.07 Book III-S section 6.5 */
|
||||
|
||||
. = 0x4e40
|
||||
SET_SCRATCH0(r13)
|
||||
@ -808,9 +813,7 @@ system_call_relon_pSeries:
|
||||
b emulation_assist_relon_hv
|
||||
|
||||
. = 0x4e60
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b hmi_exception_relon_hv
|
||||
b . /* Can't happen, see v2.07 Book III-S section 6.5 */
|
||||
|
||||
. = 0x4e80
|
||||
SET_SCRATCH0(r13)
|
||||
@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1:
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b vsx_unavailable_relon_pSeries
|
||||
|
||||
tm_unavailable_relon_pSeries_1:
|
||||
facility_unavailable_relon_trampoline:
|
||||
. = 0x4f60
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b tm_unavailable_relon_pSeries
|
||||
b facility_unavailable_relon_pSeries
|
||||
|
||||
hv_facility_unavailable_relon_trampoline:
|
||||
. = 0x4f80
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXGEN)
|
||||
b facility_unavailable_relon_hv
|
||||
|
||||
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
|
||||
#ifdef CONFIG_PPC_DENORMALISATION
|
||||
@ -1165,36 +1174,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
|
||||
bl .vsx_unavailable_exception
|
||||
b .ret_from_except
|
||||
|
||||
.align 7
|
||||
.globl tm_unavailable_common
|
||||
tm_unavailable_common:
|
||||
EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
|
||||
bl .save_nvgprs
|
||||
DISABLE_INTS
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .tm_unavailable_exception
|
||||
b .ret_from_except
|
||||
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
|
||||
|
||||
.align 7
|
||||
.globl __end_handlers
|
||||
__end_handlers:
|
||||
|
||||
/* Equivalents to the above handlers for relocation-on interrupt vectors */
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
|
||||
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
|
||||
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
|
||||
|
||||
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
||||
/*
|
||||
|
@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
length_max = 512 ; /* 64 doublewords */
|
||||
/* DAWR region can't cross 512 boundary */
|
||||
if ((bp->attr.bp_addr >> 10) !=
|
||||
((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
|
||||
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->len >
|
||||
@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
|
||||
* we still need to single-step the instruction, but we don't
|
||||
* generate an event.
|
||||
*/
|
||||
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
|
||||
if (!((bp->attr.bp_addr <= dar) &&
|
||||
(dar - bp->attr.bp_addr < bp->attr.bp_len)))
|
||||
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
|
||||
|
@ -85,7 +85,7 @@ int powersave_nap;
|
||||
/*
|
||||
* Register the sysctl to set/clear powersave_nap.
|
||||
*/
|
||||
static ctl_table powersave_nap_ctl_table[]={
|
||||
static struct ctl_table powersave_nap_ctl_table[] = {
|
||||
{
|
||||
.procname = "powersave-nap",
|
||||
.data = &powersave_nap,
|
||||
@ -95,7 +95,7 @@ static ctl_table powersave_nap_ctl_table[]={
|
||||
},
|
||||
{}
|
||||
};
|
||||
static ctl_table powersave_nap_sysctl_root[] = {
|
||||
static struct ctl_table powersave_nap_sysctl_root[] = {
|
||||
{
|
||||
.procname = "kernel",
|
||||
.mode = 0555,
|
||||
|
@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
|
||||
|
||||
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
||||
{
|
||||
unsigned hugepage_shift;
|
||||
struct iowa_bus *bus;
|
||||
int token;
|
||||
|
||||
@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
||||
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
|
||||
return NULL;
|
||||
|
||||
ptep = find_linux_pte(init_mm.pgd, vaddr);
|
||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
|
||||
&hugepage_shift);
|
||||
if (ptep == NULL)
|
||||
paddr = 0;
|
||||
else
|
||||
else {
|
||||
/*
|
||||
* we don't have hugepages backing iomem
|
||||
*/
|
||||
WARN_ON(hugepage_shift);
|
||||
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
bus = iowa_pci_find(vaddr, paddr);
|
||||
|
||||
if (bus == NULL)
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/iommu.h>
|
||||
@ -44,6 +46,7 @@
|
||||
#include <asm/kdump.h>
|
||||
#include <asm/fadump.h>
|
||||
#include <asm/vio.h>
|
||||
#include <asm/tce.h>
|
||||
|
||||
#define DBG(...)
|
||||
|
||||
@ -724,6 +727,13 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
||||
if (tbl->it_offset == 0)
|
||||
clear_bit(0, tbl->it_map);
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
if (tbl->it_group) {
|
||||
iommu_group_put(tbl->it_group);
|
||||
BUG_ON(tbl->it_group);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* verify that table contains no entries */
|
||||
if (!bitmap_empty(tbl->it_map, tbl->it_size))
|
||||
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
|
||||
@ -860,3 +870,316 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
/*
|
||||
* SPAPR TCE API
|
||||
*/
|
||||
static void group_release(void *iommu_data)
|
||||
{
|
||||
struct iommu_table *tbl = iommu_data;
|
||||
tbl->it_group = NULL;
|
||||
}
|
||||
|
||||
void iommu_register_group(struct iommu_table *tbl,
|
||||
int pci_domain_number, unsigned long pe_num)
|
||||
{
|
||||
struct iommu_group *grp;
|
||||
char *name;
|
||||
|
||||
grp = iommu_group_alloc();
|
||||
if (IS_ERR(grp)) {
|
||||
pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
|
||||
PTR_ERR(grp));
|
||||
return;
|
||||
}
|
||||
tbl->it_group = grp;
|
||||
iommu_group_set_iommudata(grp, tbl, group_release);
|
||||
name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
|
||||
pci_domain_number, pe_num);
|
||||
if (!name)
|
||||
return;
|
||||
iommu_group_set_name(grp, name);
|
||||
kfree(name);
|
||||
}
|
||||
|
||||
enum dma_data_direction iommu_tce_direction(unsigned long tce)
|
||||
{
|
||||
if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
|
||||
return DMA_BIDIRECTIONAL;
|
||||
else if (tce & TCE_PCI_READ)
|
||||
return DMA_TO_DEVICE;
|
||||
else if (tce & TCE_PCI_WRITE)
|
||||
return DMA_FROM_DEVICE;
|
||||
else
|
||||
return DMA_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_direction);
|
||||
|
||||
void iommu_flush_tce(struct iommu_table *tbl)
|
||||
{
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
|
||||
/* Make sure updates are seen by hardware */
|
||||
mb();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_flush_tce);
|
||||
|
||||
int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce_value,
|
||||
unsigned long npages)
|
||||
{
|
||||
/* ppc_md.tce_free() does not support any value but 0 */
|
||||
if (tce_value)
|
||||
return -EINVAL;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= IOMMU_PAGE_SHIFT;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
|
||||
|
||||
int iommu_tce_put_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce)
|
||||
{
|
||||
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
||||
return -EINVAL;
|
||||
|
||||
if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
return -EINVAL;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= IOMMU_PAGE_SHIFT;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
|
||||
|
||||
unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long oldtce;
|
||||
struct iommu_pool *pool = get_pool(tbl, entry);
|
||||
|
||||
spin_lock(&(pool->lock));
|
||||
|
||||
oldtce = ppc_md.tce_get(tbl, entry);
|
||||
if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
ppc_md.tce_free(tbl, entry, 1);
|
||||
else
|
||||
oldtce = 0;
|
||||
|
||||
spin_unlock(&(pool->lock));
|
||||
|
||||
return oldtce;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_clear_tce);
|
||||
|
||||
int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long pages)
|
||||
{
|
||||
unsigned long oldtce;
|
||||
struct page *page;
|
||||
|
||||
for ( ; pages; --pages, ++entry) {
|
||||
oldtce = iommu_clear_tce(tbl, entry);
|
||||
if (!oldtce)
|
||||
continue;
|
||||
|
||||
page = pfn_to_page(oldtce >> PAGE_SHIFT);
|
||||
WARN_ON(!page);
|
||||
if (page) {
|
||||
if (oldtce & TCE_PCI_WRITE)
|
||||
SetPageDirty(page);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages);
|
||||
|
||||
/*
|
||||
* hwaddr is a kernel virtual address here (0xc... bazillion),
|
||||
* tce_build converts it to a physical address.
|
||||
*/
|
||||
int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long hwaddr, enum dma_data_direction direction)
|
||||
{
|
||||
int ret = -EBUSY;
|
||||
unsigned long oldtce;
|
||||
struct iommu_pool *pool = get_pool(tbl, entry);
|
||||
|
||||
spin_lock(&(pool->lock));
|
||||
|
||||
oldtce = ppc_md.tce_get(tbl, entry);
|
||||
/* Add new entry if it is not busy */
|
||||
if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
||||
ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
|
||||
|
||||
spin_unlock(&(pool->lock));
|
||||
|
||||
/* if (unlikely(ret))
|
||||
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
|
||||
__func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
|
||||
hwaddr, ret); */
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_build);
|
||||
|
||||
int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long tce)
|
||||
{
|
||||
int ret;
|
||||
struct page *page = NULL;
|
||||
unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
|
||||
enum dma_data_direction direction = iommu_tce_direction(tce);
|
||||
|
||||
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
|
||||
direction != DMA_TO_DEVICE, &page);
|
||||
if (unlikely(ret != 1)) {
|
||||
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
|
||||
tce, entry << IOMMU_PAGE_SHIFT, ret); */
|
||||
return -EFAULT;
|
||||
}
|
||||
hwaddr = (unsigned long) page_address(page) + offset;
|
||||
|
||||
ret = iommu_tce_build(tbl, entry, hwaddr, direction);
|
||||
if (ret)
|
||||
put_page(page);
|
||||
|
||||
if (ret < 0)
|
||||
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
|
||||
__func__, entry << IOMMU_PAGE_SHIFT, tce, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode);
|
||||
|
||||
int iommu_take_ownership(struct iommu_table *tbl)
|
||||
{
|
||||
unsigned long sz = (tbl->it_size + 7) >> 3;
|
||||
|
||||
if (tbl->it_offset == 0)
|
||||
clear_bit(0, tbl->it_map);
|
||||
|
||||
if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
|
||||
pr_err("iommu_tce: it_map is not empty");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
memset(tbl->it_map, 0xff, sz);
|
||||
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_take_ownership);
|
||||
|
||||
void iommu_release_ownership(struct iommu_table *tbl)
|
||||
{
|
||||
unsigned long sz = (tbl->it_size + 7) >> 3;
|
||||
|
||||
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
|
||||
memset(tbl->it_map, 0, sz);
|
||||
|
||||
/* Restore bit#0 set by iommu_init_table() */
|
||||
if (tbl->it_offset == 0)
|
||||
set_bit(0, tbl->it_map);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_release_ownership);
|
||||
|
||||
static int iommu_add_device(struct device *dev)
|
||||
{
|
||||
struct iommu_table *tbl;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(dev->iommu_group)) {
|
||||
pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n",
|
||||
dev_name(dev),
|
||||
iommu_group_id(dev->iommu_group));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
tbl = get_iommu_table_base(dev);
|
||||
if (!tbl || !tbl->it_group) {
|
||||
pr_debug("iommu_tce: skipping device %s with no tbl\n",
|
||||
dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_debug("iommu_tce: adding %s to iommu group %d\n",
|
||||
dev_name(dev), iommu_group_id(tbl->it_group));
|
||||
|
||||
ret = iommu_group_add_device(tbl->it_group, dev);
|
||||
if (ret < 0)
|
||||
pr_err("iommu_tce: %s has not been added, ret=%d\n",
|
||||
dev_name(dev), ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_del_device(struct device *dev)
|
||||
{
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
static int iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
return iommu_add_device(dev);
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
iommu_del_device(dev);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block tce_iommu_bus_nb = {
|
||||
.notifier_call = iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static int __init tce_iommu_init(void)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
|
||||
|
||||
for_each_pci_dev(pdev)
|
||||
iommu_add_device(&pdev->dev);
|
||||
|
||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall_sync(tce_iommu_init);
|
||||
|
||||
#else
|
||||
|
||||
void iommu_register_group(struct iommu_table *tbl,
|
||||
int pci_domain_number, unsigned long pe_num)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
@ -116,8 +116,6 @@ static inline notrace int decrementer_check_overflow(void)
|
||||
u64 now = get_tb_or_rtc();
|
||||
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
|
||||
|
||||
if (now >= *next_tb)
|
||||
set_dec(1);
|
||||
return now >= *next_tb;
|
||||
}
|
||||
|
||||
|
@ -36,12 +36,6 @@
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
#define MSR_SINGLESTEP (MSR_DE)
|
||||
#else
|
||||
#define MSR_SINGLESTEP (MSR_SE)
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
@ -104,19 +98,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
/* We turn off async exceptions to ensure that the single step will
|
||||
* be for the instruction we have the kprobe on, if we dont its
|
||||
* possible we'd get the single step reported for an exception handler
|
||||
* like Decrementer or External Interrupt */
|
||||
regs->msr &= ~MSR_EE;
|
||||
regs->msr |= MSR_SINGLESTEP;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
regs->msr &= ~MSR_CE;
|
||||
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
|
||||
#ifdef CONFIG_PPC_47x
|
||||
isync();
|
||||
#endif
|
||||
#endif
|
||||
enable_single_step(regs);
|
||||
|
||||
/*
|
||||
* On powerpc we should single step on the original
|
||||
|
@ -84,22 +84,30 @@ static ssize_t dev_nvram_read(struct file *file, char __user *buf,
|
||||
char *tmp = NULL;
|
||||
ssize_t size;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!ppc_md.nvram_size)
|
||||
if (!ppc_md.nvram_size) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
size = ppc_md.nvram_size();
|
||||
if (*ppos >= size || size < 0)
|
||||
if (size < 0) {
|
||||
ret = size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (*ppos >= size) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
count = min_t(size_t, count, size - *ppos);
|
||||
count = min(count, PAGE_SIZE);
|
||||
|
||||
ret = -ENOMEM;
|
||||
tmp = kmalloc(count, GFP_KERNEL);
|
||||
if (!tmp)
|
||||
if (!tmp) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ppc_md.nvram_read(tmp, count, ppos);
|
||||
if (ret <= 0)
|
||||
|
111
arch/powerpc/kernel/pci-hotplug.c
Normal file
111
arch/powerpc/kernel/pci-hotplug.c
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Derived from "arch/powerpc/platforms/pseries/pci_dlpar.c"
|
||||
*
|
||||
* Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
|
||||
* Copyright (C) 2005 International Business Machines
|
||||
*
|
||||
* Updates, 2005, John Rose <johnrose@austin.ibm.com>
|
||||
* Updates, 2005, Linas Vepstas <linas@austin.ibm.com>
|
||||
* Updates, 2013, Gavin Shan <shangw@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/eeh.h>
|
||||
|
||||
/**
|
||||
* __pcibios_remove_pci_devices - remove all devices under this bus
|
||||
* @bus: the indicated PCI bus
|
||||
* @purge_pe: destroy the PE on removal of PCI devices
|
||||
*
|
||||
* Remove all of the PCI devices under this bus both from the
|
||||
* linux pci device tree, and from the powerpc EEH address cache.
|
||||
* By default, the corresponding PE will be destroied during the
|
||||
* normal PCI hotplug path. For PCI hotplug during EEH recovery,
|
||||
* the corresponding PE won't be destroied and deallocated.
|
||||
*/
|
||||
void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
|
||||
{
|
||||
struct pci_dev *dev, *tmp;
|
||||
struct pci_bus *child_bus;
|
||||
|
||||
/* First go down child busses */
|
||||
list_for_each_entry(child_bus, &bus->children, node)
|
||||
__pcibios_remove_pci_devices(child_bus, purge_pe);
|
||||
|
||||
pr_debug("PCI: Removing devices on bus %04x:%02x\n",
|
||||
pci_domain_nr(bus), bus->number);
|
||||
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
|
||||
pr_debug(" * Removing %s...\n", pci_name(dev));
|
||||
eeh_remove_bus_device(dev, purge_pe);
|
||||
pci_stop_and_remove_bus_device(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pcibios_remove_pci_devices - remove all devices under this bus
|
||||
* @bus: the indicated PCI bus
|
||||
*
|
||||
* Remove all of the PCI devices under this bus both from the
|
||||
* linux pci device tree, and from the powerpc EEH address cache.
|
||||
*/
|
||||
void pcibios_remove_pci_devices(struct pci_bus *bus)
|
||||
{
|
||||
__pcibios_remove_pci_devices(bus, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
|
||||
|
||||
/**
|
||||
* pcibios_add_pci_devices - adds new pci devices to bus
|
||||
* @bus: the indicated PCI bus
|
||||
*
|
||||
* This routine will find and fixup new pci devices under
|
||||
* the indicated bus. This routine presumes that there
|
||||
* might already be some devices under this bridge, so
|
||||
* it carefully tries to add only new devices. (And that
|
||||
* is how this routine differs from other, similar pcibios
|
||||
* routines.)
|
||||
*/
|
||||
void pcibios_add_pci_devices(struct pci_bus * bus)
|
||||
{
|
||||
int slotno, num, mode, pass, max;
|
||||
struct pci_dev *dev;
|
||||
struct device_node *dn = pci_bus_to_OF_node(bus);
|
||||
|
||||
eeh_add_device_tree_early(dn);
|
||||
|
||||
mode = PCI_PROBE_NORMAL;
|
||||
if (ppc_md.pci_probe_mode)
|
||||
mode = ppc_md.pci_probe_mode(bus);
|
||||
|
||||
if (mode == PCI_PROBE_DEVTREE) {
|
||||
/* use ofdt-based probe */
|
||||
of_rescan_bus(dn, bus);
|
||||
} else if (mode == PCI_PROBE_NORMAL) {
|
||||
/* use legacy probe */
|
||||
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
|
||||
num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
|
||||
if (!num)
|
||||
return;
|
||||
pcibios_setup_bus_devices(bus);
|
||||
max = bus->busn_res.start;
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
|
||||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
|
||||
max = pci_scan_bridge(bus, dev,
|
||||
max, pass);
|
||||
}
|
||||
}
|
||||
}
|
||||
pcibios_finish_adding_to_bus(bus);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
|
@ -916,7 +916,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
flush_altivec_to_thread(src);
|
||||
flush_vsx_to_thread(src);
|
||||
flush_spe_to_thread(src);
|
||||
|
||||
*dst = *src;
|
||||
|
||||
clear_task_ebb(dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -559,6 +559,35 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init early_reserve_mem_dt(void)
|
||||
{
|
||||
unsigned long i, len, dt_root;
|
||||
const __be32 *prop;
|
||||
|
||||
dt_root = of_get_flat_dt_root();
|
||||
|
||||
prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
|
||||
|
||||
if (!prop)
|
||||
return;
|
||||
|
||||
DBG("Found new-style reserved-ranges\n");
|
||||
|
||||
/* Each reserved range is an (address,size) pair, 2 cells each,
|
||||
* totalling 4 cells per range. */
|
||||
for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
|
||||
u64 base, size;
|
||||
|
||||
base = of_read_number(prop + (i * 4) + 0, 2);
|
||||
size = of_read_number(prop + (i * 4) + 2, 2);
|
||||
|
||||
if (size) {
|
||||
DBG("reserving: %llx -> %llx\n", base, size);
|
||||
memblock_reserve(base, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __init early_reserve_mem(void)
|
||||
{
|
||||
u64 base, size;
|
||||
@ -574,12 +603,16 @@ static void __init early_reserve_mem(void)
|
||||
self_size = initial_boot_params->totalsize;
|
||||
memblock_reserve(self_base, self_size);
|
||||
|
||||
/* Look for the new "reserved-regions" property in the DT */
|
||||
early_reserve_mem_dt();
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/* then reserve the initrd, if any */
|
||||
if (initrd_start && (initrd_end > initrd_start))
|
||||
/* Then reserve the initrd, if any */
|
||||
if (initrd_start && (initrd_end > initrd_start)) {
|
||||
memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
|
||||
_ALIGN_UP(initrd_end, PAGE_SIZE) -
|
||||
_ALIGN_DOWN(initrd_start, PAGE_SIZE));
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_INITRD */
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
@ -591,6 +624,8 @@ static void __init early_reserve_mem(void)
|
||||
u32 base_32, size_32;
|
||||
u32 *reserve_map_32 = (u32 *)reserve_map;
|
||||
|
||||
DBG("Found old 32-bit reserve map\n");
|
||||
|
||||
while (1) {
|
||||
base_32 = *(reserve_map_32++);
|
||||
size_32 = *(reserve_map_32++);
|
||||
@ -605,6 +640,9 @@ static void __init early_reserve_mem(void)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
DBG("Processing reserve map\n");
|
||||
|
||||
/* Handle the reserve map in the fdt blob if it exists */
|
||||
while (1) {
|
||||
base = *(reserve_map++);
|
||||
size = *(reserve_map++);
|
||||
|
@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
|
||||
*/
|
||||
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
|
||||
len = bp_info->addr2 - bp_info->addr;
|
||||
} else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
|
||||
} else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
|
||||
len = 1;
|
||||
else {
|
||||
ptrace_put_breakpoints(child);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ ha16:
|
||||
/* R_PPC_ADDR16_LO */
|
||||
lo16:
|
||||
cmpwi r4, R_PPC_ADDR16_LO
|
||||
bne nxtrela
|
||||
bne unknown_type
|
||||
lwz r4, 0(r9) /* r_offset */
|
||||
lwz r0, 8(r9) /* r_addend */
|
||||
add r0, r0, r3
|
||||
@ -191,6 +191,7 @@ nxtrela:
|
||||
dcbst r4,r7
|
||||
sync /* Ensure the data is flushed before icbi */
|
||||
icbi r4,r7
|
||||
unknown_type:
|
||||
cmpwi r8, 0 /* relasz = 0 ? */
|
||||
ble done
|
||||
add r9, r9, r6 /* move to next entry in the .rela table */
|
||||
|
@ -1172,7 +1172,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
|
||||
static arch_spinlock_t timebase_lock;
|
||||
static u64 timebase = 0;
|
||||
|
||||
void __cpuinit rtas_give_timebase(void)
|
||||
void rtas_give_timebase(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -1189,7 +1189,7 @@ void __cpuinit rtas_give_timebase(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __cpuinit rtas_take_timebase(void)
|
||||
void rtas_take_timebase(void)
|
||||
{
|
||||
while (!timebase)
|
||||
barrier();
|
||||
|
@ -76,7 +76,7 @@
|
||||
#endif
|
||||
|
||||
int boot_cpuid = 0;
|
||||
int __initdata spinning_secondaries;
|
||||
int spinning_secondaries;
|
||||
u64 ppc64_pft_size;
|
||||
|
||||
/* Pick defaults since we might want to patch instructions
|
||||
|
@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
|
||||
* altivec/spe instructions at some point.
|
||||
*/
|
||||
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
||||
int sigret, int ctx_has_vsx_region)
|
||||
struct mcontext __user *tm_frame, int sigret,
|
||||
int ctx_has_vsx_region)
|
||||
{
|
||||
unsigned long msr = regs->msr;
|
||||
|
||||
@ -475,6 +476,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
||||
|
||||
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
|
||||
return 1;
|
||||
/* We need to write 0 the MSR top 32 bits in the tm frame so that we
|
||||
* can check it on the restore to see if TM is active
|
||||
*/
|
||||
if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
|
||||
return 1;
|
||||
|
||||
if (sigret) {
|
||||
/* Set up the sigreturn trampoline: li r0,sigret; sc */
|
||||
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
|
||||
@ -747,7 +754,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
||||
struct mcontext __user *tm_sr)
|
||||
{
|
||||
long err;
|
||||
unsigned long msr;
|
||||
unsigned long msr, msr_hi;
|
||||
#ifdef CONFIG_VSX
|
||||
int i;
|
||||
#endif
|
||||
@ -852,8 +859,11 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
||||
tm_enable();
|
||||
/* This loads the checkpointed FP/VEC state, if used */
|
||||
tm_recheckpoint(¤t->thread, msr);
|
||||
/* The task has moved into TM state S, so ensure MSR reflects this */
|
||||
regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
|
||||
/* Get the top half of the MSR */
|
||||
if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
|
||||
return 1;
|
||||
/* Pull in MSR TM from user context */
|
||||
regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
|
||||
|
||||
/* This loads the speculative FP/VEC state, if used */
|
||||
if (msr & MSR_FP) {
|
||||
@ -952,6 +962,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
{
|
||||
struct rt_sigframe __user *rt_sf;
|
||||
struct mcontext __user *frame;
|
||||
struct mcontext __user *tm_frame = NULL;
|
||||
void __user *addr;
|
||||
unsigned long newsp = 0;
|
||||
int sigret;
|
||||
@ -985,23 +996,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_frame = &rt_sf->uc_transact.uc_mcontext;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
|
||||
&rt_sf->uc_transact.uc_mcontext, sigret))
|
||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (save_user_regs(regs, frame, sigret, 1))
|
||||
{
|
||||
if (save_user_regs(regs, frame, tm_frame, sigret, 1))
|
||||
goto badframe;
|
||||
}
|
||||
regs->link = tramp;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (__put_user((unsigned long)&rt_sf->uc_transact,
|
||||
&rt_sf->uc.uc_link)
|
||||
|| __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
|
||||
&rt_sf->uc_transact.uc_regs))
|
||||
|| __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
@ -1170,7 +1182,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
|
||||
mctx = (struct mcontext __user *)
|
||||
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
|
||||
if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
|
||||
|| save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
|
||||
|| save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
|
||||
|| put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
|
||||
|| __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
|
||||
return -EFAULT;
|
||||
@ -1233,7 +1245,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|
||||
if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
|
||||
goto bad;
|
||||
|
||||
if (MSR_TM_SUSPENDED(msr_hi<<32)) {
|
||||
if (MSR_TM_ACTIVE(msr_hi<<32)) {
|
||||
/* We only recheckpoint on return if we're
|
||||
* transaction.
|
||||
*/
|
||||
@ -1392,6 +1404,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
{
|
||||
struct sigcontext __user *sc;
|
||||
struct sigframe __user *frame;
|
||||
struct mcontext __user *tm_mctx = NULL;
|
||||
unsigned long newsp = 0;
|
||||
int sigret;
|
||||
unsigned long tramp;
|
||||
@ -1425,6 +1438,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_mctx = &frame->mctx_transact;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
|
||||
sigret))
|
||||
@ -1432,8 +1446,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (save_user_regs(regs, &frame->mctx, sigret, 1))
|
||||
{
|
||||
if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
|
||||
goto badframe;
|
||||
}
|
||||
|
||||
regs->link = tramp;
|
||||
|
||||
@ -1481,16 +1497,22 @@ badframe:
|
||||
long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct sigframe __user *sf;
|
||||
struct sigcontext __user *sc;
|
||||
struct sigcontext sigctx;
|
||||
struct mcontext __user *sr;
|
||||
void __user *addr;
|
||||
sigset_t set;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
struct mcontext __user *mcp, *tm_mcp;
|
||||
unsigned long msr_hi;
|
||||
#endif
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
|
||||
sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
|
||||
sc = &sf->sctx;
|
||||
addr = sc;
|
||||
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
|
||||
goto badframe;
|
||||
@ -1507,11 +1529,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|
||||
#endif
|
||||
set_current_blocked(&set);
|
||||
|
||||
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
|
||||
addr = sr;
|
||||
if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
|
||||
|| restore_user_regs(regs, sr, 1))
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
mcp = (struct mcontext __user *)&sf->mctx;
|
||||
tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
|
||||
if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
|
||||
goto badframe;
|
||||
if (MSR_TM_ACTIVE(msr_hi<<32)) {
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
goto badframe;
|
||||
if (restore_tm_user_regs(regs, mcp, tm_mcp))
|
||||
goto badframe;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
|
||||
addr = sr;
|
||||
if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
|
||||
|| restore_user_regs(regs, sr, 1))
|
||||
goto badframe;
|
||||
}
|
||||
|
||||
set_thread_flag(TIF_RESTOREALL);
|
||||
return 0;
|
||||
|
@ -410,6 +410,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
|
||||
|
||||
/* get MSR separately, transfer the LE bit if doing signal return */
|
||||
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
|
||||
/* pull in MSR TM from user context */
|
||||
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
|
||||
|
||||
/* pull in MSR LE from user context */
|
||||
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
|
||||
|
||||
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
|
||||
@ -505,8 +509,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
|
||||
tm_enable();
|
||||
/* This loads the checkpointed FP/VEC state, if used */
|
||||
tm_recheckpoint(¤t->thread, msr);
|
||||
/* The task has moved into TM state S, so ensure MSR reflects this: */
|
||||
regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
|
||||
|
||||
/* This loads the speculative FP/VEC state, if used */
|
||||
if (msr & MSR_FP) {
|
||||
@ -654,7 +656,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
|
||||
goto badframe;
|
||||
if (MSR_TM_SUSPENDED(msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
/* We recheckpoint on return. */
|
||||
struct ucontext __user *uc_transact;
|
||||
if (__get_user(uc_transact, &uc->uc_link))
|
||||
|
@ -480,7 +480,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
|
||||
secondary_ti = current_set[cpu] = ti;
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int rc, c;
|
||||
|
||||
@ -610,7 +610,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
|
||||
}
|
||||
|
||||
/* Activate a secondary processor. */
|
||||
__cpuinit void start_secondary(void *unused)
|
||||
void start_secondary(void *unused)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct device_node *l2_cache;
|
||||
@ -637,12 +637,10 @@ __cpuinit void start_secondary(void *unused)
|
||||
|
||||
vdso_getcpu_init();
|
||||
#endif
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_sibling(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
if (cpu_is_offline(base + i))
|
||||
if (cpu_is_offline(base + i) && (cpu != base + i))
|
||||
continue;
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
|
||||
cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
|
||||
@ -667,6 +665,10 @@ __cpuinit void start_secondary(void *unused)
|
||||
}
|
||||
of_node_put(l2_cache);
|
||||
|
||||
smp_wmb();
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
|
@ -341,7 +341,7 @@ static struct device_attribute pa6t_attrs[] = {
|
||||
#endif /* HAS_PPC_PMC_PA6T */
|
||||
#endif /* HAS_PPC_PMC_CLASSIC */
|
||||
|
||||
static void __cpuinit register_cpu_online(unsigned int cpu)
|
||||
static void register_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
struct device *s = &c->dev;
|
||||
@ -502,7 +502,7 @@ ssize_t arch_cpu_release(const char *buf, size_t count)
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
|
||||
static int sysfs_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)(long)hcpu;
|
||||
@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
|
||||
static struct notifier_block sysfs_cpu_nb = {
|
||||
.notifier_call = sysfs_cpu_notify,
|
||||
};
|
||||
|
||||
|
@ -631,7 +631,6 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
|
||||
return found;
|
||||
}
|
||||
|
||||
/* should become __cpuinit when secondary_cpu_time_init also is */
|
||||
void start_cpu_decrementer(void)
|
||||
{
|
||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||
|
@ -112,9 +112,18 @@ _GLOBAL(tm_reclaim)
|
||||
std r3, STACK_PARAM(0)(r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* We need to setup MSR for VSX register save instructions. Here we
|
||||
* also clear the MSR RI since when we do the treclaim, we won't have a
|
||||
* valid kernel pointer for a while. We clear RI here as it avoids
|
||||
* adding another mtmsr closer to the treclaim. This makes the region
|
||||
* maked as non-recoverable wider than it needs to be but it saves on
|
||||
* inserting another mtmsrd later.
|
||||
*/
|
||||
mfmsr r14
|
||||
mr r15, r14
|
||||
ori r15, r15, MSR_FP
|
||||
li r16, MSR_RI
|
||||
andc r15, r15, r16
|
||||
oris r15, r15, MSR_VEC@h
|
||||
#ifdef CONFIG_VSX
|
||||
BEGIN_FTR_SECTION
|
||||
@ -349,9 +358,10 @@ restore_gprs:
|
||||
mtcr r5
|
||||
mtxer r6
|
||||
|
||||
/* MSR and flags: We don't change CRs, and we don't need to alter
|
||||
* MSR.
|
||||
/* Clear the MSR RI since we are about to change R1. EE is already off
|
||||
*/
|
||||
li r4, 0
|
||||
mtmsrd r4, 1
|
||||
|
||||
REST_4GPRS(0, r7) /* GPR0-3 */
|
||||
REST_GPR(4, r7) /* GPR4-6 */
|
||||
@ -377,6 +387,10 @@ restore_gprs:
|
||||
GET_PACA(r13)
|
||||
GET_SCRATCH0(r1)
|
||||
|
||||
/* R1 is restored, so we are recoverable again. EE is still off */
|
||||
li r4, MSR_RI
|
||||
mtmsrd r4, 1
|
||||
|
||||
REST_NVGPRS(r1)
|
||||
|
||||
addi r1, r1, TM_FRAME_SIZE
|
||||
|
@ -866,6 +866,10 @@ static int emulate_string_inst(struct pt_regs *regs, u32 instword)
|
||||
u8 val;
|
||||
u32 shift = 8 * (3 - (pos & 0x3));
|
||||
|
||||
/* if process is 32-bit, clear upper 32 bits of EA */
|
||||
if ((regs->msr & MSR_64BIT) == 0)
|
||||
EA &= 0xFFFFFFFF;
|
||||
|
||||
switch ((instword & PPC_INST_STRING_MASK)) {
|
||||
case PPC_INST_LSWX:
|
||||
case PPC_INST_LSWI:
|
||||
@ -1125,7 +1129,17 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
* ESR_DST (!?) or 0. In the process of chasing this with the
|
||||
* hardware people - not sure if it can happen on any illegal
|
||||
* instruction or only on FP instructions, whether there is a
|
||||
* pattern to occurrences etc. -dgibson 31/Mar/2003 */
|
||||
* pattern to occurrences etc. -dgibson 31/Mar/2003
|
||||
*/
|
||||
|
||||
/*
|
||||
* If we support a HW FPU, we need to ensure the FP state
|
||||
* if flushed into the thread_struct before attempting
|
||||
* emulation
|
||||
*/
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
flush_fp_to_thread(current);
|
||||
#endif
|
||||
switch (do_mathemu(regs)) {
|
||||
case 0:
|
||||
emulate_single_step(regs);
|
||||
@ -1282,25 +1296,50 @@ void vsx_unavailable_exception(struct pt_regs *regs)
|
||||
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
|
||||
}
|
||||
|
||||
void tm_unavailable_exception(struct pt_regs *regs)
|
||||
void facility_unavailable_exception(struct pt_regs *regs)
|
||||
{
|
||||
static char *facility_strings[] = {
|
||||
"FPU",
|
||||
"VMX/VSX",
|
||||
"DSCR",
|
||||
"PMU SPRs",
|
||||
"BHRB",
|
||||
"TM",
|
||||
"AT",
|
||||
"EBB",
|
||||
"TAR",
|
||||
};
|
||||
char *facility, *prefix;
|
||||
u64 value;
|
||||
|
||||
if (regs->trap == 0xf60) {
|
||||
value = mfspr(SPRN_FSCR);
|
||||
prefix = "";
|
||||
} else {
|
||||
value = mfspr(SPRN_HFSCR);
|
||||
prefix = "Hypervisor ";
|
||||
}
|
||||
|
||||
value = value >> 56;
|
||||
|
||||
/* We restore the interrupt state now */
|
||||
if (!arch_irq_disabled_regs(regs))
|
||||
local_irq_enable();
|
||||
|
||||
/* Currently we never expect a TMU exception. Catch
|
||||
* this and kill the process!
|
||||
*/
|
||||
printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
|
||||
"(msr %lx)\n",
|
||||
regs->nip, regs->msr);
|
||||
if (value < ARRAY_SIZE(facility_strings))
|
||||
facility = facility_strings[value];
|
||||
else
|
||||
facility = "unknown";
|
||||
|
||||
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
|
||||
prefix, facility, regs->nip, regs->msr);
|
||||
|
||||
if (user_mode(regs)) {
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
die("Unexpected TM unavailable exception", regs, SIGABRT);
|
||||
die("Unexpected facility unavailable exception", regs, SIGABRT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
@ -1396,8 +1435,7 @@ void performance_monitor_exception(struct pt_regs *regs)
|
||||
void SoftwareEmulation(struct pt_regs *regs)
|
||||
{
|
||||
extern int do_mathemu(struct pt_regs *);
|
||||
extern int Soft_emulate_8xx(struct pt_regs *);
|
||||
#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
|
||||
#if defined(CONFIG_MATH_EMULATION)
|
||||
int errcode;
|
||||
#endif
|
||||
|
||||
@ -1430,23 +1468,6 @@ void SoftwareEmulation(struct pt_regs *regs)
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
return;
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
|
||||
errcode = Soft_emulate_8xx(regs);
|
||||
if (errcode >= 0)
|
||||
PPC_WARN_EMULATED(8xx, regs);
|
||||
|
||||
switch (errcode) {
|
||||
case 0:
|
||||
emulate_single_step(regs);
|
||||
return;
|
||||
case 1:
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
return;
|
||||
case -EFAULT:
|
||||
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
||||
#endif
|
||||
@ -1796,8 +1817,6 @@ struct ppc_emulated ppc_emulated = {
|
||||
WARN_EMULATED_SETUP(unaligned),
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
WARN_EMULATED_SETUP(math),
|
||||
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
|
||||
WARN_EMULATED_SETUP(8xx),
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
WARN_EMULATED_SETUP(vsx),
|
||||
|
@ -50,7 +50,7 @@ void __init udbg_early_init(void)
|
||||
udbg_init_debug_beat();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
|
||||
udbg_init_pas_realmode();
|
||||
#elif defined(CONFIG_BOOTX_TEXT)
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
|
||||
udbg_init_btext();
|
||||
#elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
|
||||
/* PPC44x debug */
|
||||
|
@ -711,7 +711,7 @@ static void __init vdso_setup_syscall_map(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
int __cpuinit vdso_getcpu_init(void)
|
||||
int vdso_getcpu_init(void)
|
||||
{
|
||||
unsigned long cpu, node, val;
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
||||
{
|
||||
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
|
||||
MMU_PAGE_4K, MMU_SEGSIZE_256M,
|
||||
MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -675,6 +675,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
/* if the guest wants write access, see if that is OK */
|
||||
if (!writing && hpte_is_writable(r)) {
|
||||
unsigned int hugepage_shift;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
/*
|
||||
@ -683,9 +684,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
rcu_read_lock_sched();
|
||||
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
|
||||
hva, NULL);
|
||||
if (ptep && pte_present(*ptep)) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1);
|
||||
hva, &hugepage_shift);
|
||||
if (ptep) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1,
|
||||
hugepage_shift);
|
||||
if (pte_write(pte))
|
||||
write_ok = 1;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x)
|
||||
unsigned long addr = (unsigned long) x;
|
||||
pte_t *p;
|
||||
|
||||
p = find_linux_pte(swapper_pg_dir, addr);
|
||||
p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
|
||||
if (!p || !pte_present(*p))
|
||||
return NULL;
|
||||
/* assume we don't have huge pages in vmalloc space... */
|
||||
@ -139,20 +139,18 @@ static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
|
||||
{
|
||||
pte_t *ptep;
|
||||
unsigned long ps = *pte_sizep;
|
||||
unsigned int shift;
|
||||
unsigned int hugepage_shift;
|
||||
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
|
||||
if (!ptep)
|
||||
return __pte(0);
|
||||
if (shift)
|
||||
*pte_sizep = 1ul << shift;
|
||||
if (hugepage_shift)
|
||||
*pte_sizep = 1ul << hugepage_shift;
|
||||
else
|
||||
*pte_sizep = PAGE_SIZE;
|
||||
if (ps > *pte_sizep)
|
||||
return __pte(0);
|
||||
if (!pte_present(*ptep))
|
||||
return __pte(0);
|
||||
return kvmppc_read_update_linux_pte(ptep, writing);
|
||||
return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
|
||||
}
|
||||
|
||||
static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
|
||||
|
@ -580,7 +580,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
if (instr & 1)
|
||||
regs->link = regs->nip;
|
||||
if (branch_taken(instr, regs))
|
||||
regs->nip = imm;
|
||||
regs->nip = truncate_if_32bit(regs->msr, imm);
|
||||
return 1;
|
||||
#ifdef CONFIG_PPC64
|
||||
case 17: /* sc */
|
||||
|
@ -4,7 +4,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \
|
||||
fmadd.o fmadds.o fmsub.o fmsubs.o \
|
||||
fmul.o fmuls.o fnabs.o fneg.o \
|
||||
fnmadd.o fnmadds.o fnmsub.o fnmsubs.o \
|
||||
fres.o frsp.o frsqrte.o fsel.o lfs.o \
|
||||
fres.o fre.o frsp.o fsel.o lfs.o \
|
||||
frsqrte.o frsqrtes.o \
|
||||
fsqrt.o fsqrts.o fsub.o fsubs.o \
|
||||
mcrfs.o mffs.o mtfsb0.o mtfsb1.o \
|
||||
mtfsf.o mtfsfi.o stfiwx.o stfs.o \
|
||||
|
11
arch/powerpc/math-emu/fre.c
Normal file
11
arch/powerpc/math-emu/fre.c
Normal file
@ -0,0 +1,11 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
int fre(void *frD, void *frB)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printk("%s: %p %p\n", __func__, frD, frB);
|
||||
#endif
|
||||
return -ENOSYS;
|
||||
}
|
11
arch/powerpc/math-emu/frsqrtes.c
Normal file
11
arch/powerpc/math-emu/frsqrtes.c
Normal file
@ -0,0 +1,11 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
int frsqrtes(void *frD, void *frB)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printk("%s: %p %p\n", __func__, frD, frB);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
@ -58,8 +58,10 @@ FLOATFUNC(fnabs);
|
||||
FLOATFUNC(fneg);
|
||||
|
||||
/* Optional */
|
||||
FLOATFUNC(fre);
|
||||
FLOATFUNC(fres);
|
||||
FLOATFUNC(frsqrte);
|
||||
FLOATFUNC(frsqrtes);
|
||||
FLOATFUNC(fsel);
|
||||
FLOATFUNC(fsqrt);
|
||||
FLOATFUNC(fsqrts);
|
||||
@ -97,6 +99,7 @@ FLOATFUNC(fsqrts);
|
||||
#define FSQRTS 0x016 /* 22 */
|
||||
#define FRES 0x018 /* 24 */
|
||||
#define FMULS 0x019 /* 25 */
|
||||
#define FRSQRTES 0x01a /* 26 */
|
||||
#define FMSUBS 0x01c /* 28 */
|
||||
#define FMADDS 0x01d /* 29 */
|
||||
#define FNMSUBS 0x01e /* 30 */
|
||||
@ -109,6 +112,7 @@ FLOATFUNC(fsqrts);
|
||||
#define FADD 0x015 /* 21 */
|
||||
#define FSQRT 0x016 /* 22 */
|
||||
#define FSEL 0x017 /* 23 */
|
||||
#define FRE 0x018 /* 24 */
|
||||
#define FMUL 0x019 /* 25 */
|
||||
#define FRSQRTE 0x01a /* 26 */
|
||||
#define FMSUB 0x01c /* 28 */
|
||||
@ -299,9 +303,10 @@ do_mathemu(struct pt_regs *regs)
|
||||
case FDIVS: func = fdivs; type = AB; break;
|
||||
case FSUBS: func = fsubs; type = AB; break;
|
||||
case FADDS: func = fadds; type = AB; break;
|
||||
case FSQRTS: func = fsqrts; type = AB; break;
|
||||
case FRES: func = fres; type = AB; break;
|
||||
case FSQRTS: func = fsqrts; type = XB; break;
|
||||
case FRES: func = fres; type = XB; break;
|
||||
case FMULS: func = fmuls; type = AC; break;
|
||||
case FRSQRTES: func = frsqrtes;type = XB; break;
|
||||
case FMSUBS: func = fmsubs; type = ABC; break;
|
||||
case FMADDS: func = fmadds; type = ABC; break;
|
||||
case FNMSUBS: func = fnmsubs; type = ABC; break;
|
||||
@ -317,10 +322,11 @@ do_mathemu(struct pt_regs *regs)
|
||||
case FDIV: func = fdiv; type = AB; break;
|
||||
case FSUB: func = fsub; type = AB; break;
|
||||
case FADD: func = fadd; type = AB; break;
|
||||
case FSQRT: func = fsqrt; type = AB; break;
|
||||
case FSQRT: func = fsqrt; type = XB; break;
|
||||
case FRE: func = fre; type = XB; break;
|
||||
case FSEL: func = fsel; type = ABC; break;
|
||||
case FMUL: func = fmul; type = AC; break;
|
||||
case FRSQRTE: func = frsqrte; type = AB; break;
|
||||
case FRSQRTE: func = frsqrte; type = XB; break;
|
||||
case FMSUB: func = fmsub; type = ABC; break;
|
||||
case FMADD: func = fmadd; type = ABC; break;
|
||||
case FNMSUB: func = fnmsub; type = ABC; break;
|
||||
|
@ -41,7 +41,7 @@ int icache_44x_need_flush;
|
||||
|
||||
unsigned long tlb_47x_boltmap[1024/8];
|
||||
|
||||
static void __cpuinit ppc44x_update_tlb_hwater(void)
|
||||
static void ppc44x_update_tlb_hwater(void)
|
||||
{
|
||||
extern unsigned int tlb_44x_patch_hwater_D[];
|
||||
extern unsigned int tlb_44x_patch_hwater_I[];
|
||||
@ -134,7 +134,7 @@ static void __init ppc47x_update_boltmap(void)
|
||||
/*
|
||||
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
|
||||
*/
|
||||
static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
|
||||
static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
|
||||
{
|
||||
unsigned int rA;
|
||||
int bolted;
|
||||
@ -229,7 +229,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __cpuinit mmu_init_secondary(int cpu)
|
||||
void mmu_init_secondary(int cpu)
|
||||
{
|
||||
unsigned long addr;
|
||||
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
|
||||
|
@ -6,17 +6,16 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
||||
|
||||
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
||||
|
||||
obj-y := fault.o mem.o pgtable.o gup.o \
|
||||
obj-y := fault.o mem.o pgtable.o gup.o mmap.o \
|
||||
init_$(CONFIG_WORD_SIZE).o \
|
||||
pgtable_$(CONFIG_WORD_SIZE).o
|
||||
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
|
||||
tlb_nohash_low.o
|
||||
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
|
||||
obj-$(CONFIG_PPC64) += mmap_64.o
|
||||
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
|
||||
obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
|
||||
slb_low.o slb.o stab.o \
|
||||
mmap_64.o $(hash64-y)
|
||||
$(hash64-y)
|
||||
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
|
||||
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
|
||||
tlb_hash$(CONFIG_WORD_SIZE).o \
|
||||
@ -28,11 +27,12 @@ obj-$(CONFIG_44x) += 44x_mmu.o
|
||||
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
|
||||
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
|
||||
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
|
||||
ifeq ($(CONFIG_HUGETLB_PAGE),y)
|
||||
obj-y += hugetlbpage.o
|
||||
ifeq ($(CONFIG_HUGETLB_PAGE),y)
|
||||
obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
|
||||
endif
|
||||
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
|
||||
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
|
||||
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
|
||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||
|
@ -34,7 +34,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
|
||||
ptep = pte_offset_kernel(&pmd, addr);
|
||||
do {
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = ACCESS_ONCE(*ptep);
|
||||
struct page *page;
|
||||
|
||||
if ((pte_val(pte) & mask) != result)
|
||||
@ -63,12 +63,18 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
pmd_t pmd = ACCESS_ONCE(*pmdp);
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(pmd))
|
||||
/*
|
||||
* If we find a splitting transparent hugepage we
|
||||
* return zero. That will result in taking the slow
|
||||
* path which will call wait_split_huge_page()
|
||||
* if the pmd is still in splitting state
|
||||
*/
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (pmd_huge(pmd)) {
|
||||
if (pmd_huge(pmd) || pmd_large(pmd)) {
|
||||
if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
|
||||
write, pages, nr))
|
||||
return 0;
|
||||
@ -91,7 +97,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
pud_t pud = ACCESS_ONCE(*pudp);
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
@ -154,7 +160,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
pgd_t pgd = ACCESS_ONCE(*pgdp);
|
||||
|
||||
pr_devel(" %016lx: normal pgd %p\n", addr,
|
||||
(void *)pgd_val(pgd));
|
||||
|
@ -289,9 +289,10 @@ htab_modify_pte:
|
||||
|
||||
/* Call ppc_md.hpte_updatepp */
|
||||
mr r5,r29 /* vpn */
|
||||
li r6,MMU_PAGE_4K /* page size */
|
||||
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
li r6,MMU_PAGE_4K /* base page size */
|
||||
li r7,MMU_PAGE_4K /* actual page size */
|
||||
ld r8,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r9,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
_GLOBAL(htab_call_hpte_updatepp)
|
||||
bl . /* Patched by htab_finish_init() */
|
||||
|
||||
@ -649,9 +650,10 @@ htab_modify_pte:
|
||||
|
||||
/* Call ppc_md.hpte_updatepp */
|
||||
mr r5,r29 /* vpn */
|
||||
li r6,MMU_PAGE_4K /* page size */
|
||||
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
li r6,MMU_PAGE_4K /* base page size */
|
||||
li r7,MMU_PAGE_4K /* actual page size */
|
||||
ld r8,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r9,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
_GLOBAL(htab_call_hpte_updatepp)
|
||||
bl . /* patched by htab_finish_init() */
|
||||
|
||||
@ -937,9 +939,10 @@ ht64_modify_pte:
|
||||
|
||||
/* Call ppc_md.hpte_updatepp */
|
||||
mr r5,r29 /* vpn */
|
||||
li r6,MMU_PAGE_64K
|
||||
ld r7,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
li r6,MMU_PAGE_64K /* base page size */
|
||||
li r7,MMU_PAGE_64K /* actual page size */
|
||||
ld r8,STK_PARAM(R9)(r1) /* segment size */
|
||||
ld r9,STK_PARAM(R8)(r1) /* get "local" param */
|
||||
_GLOBAL(ht64_call_hpte_updatepp)
|
||||
bl . /* patched by htab_finish_init() */
|
||||
|
||||
|
@ -273,61 +273,15 @@ static long native_hpte_remove(unsigned long hpte_group)
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int __hpte_actual_psize(unsigned int lp, int psize)
|
||||
{
|
||||
int i, shift;
|
||||
unsigned int mask;
|
||||
|
||||
/* start from 1 ignoring MMU_PAGE_4K */
|
||||
for (i = 1; i < MMU_PAGE_COUNT; i++) {
|
||||
|
||||
/* invalid penc */
|
||||
if (mmu_psize_defs[psize].penc[i] == -1)
|
||||
continue;
|
||||
/*
|
||||
* encoding bits per actual page size
|
||||
* PTE LP actual page size
|
||||
* rrrr rrrz >=8KB
|
||||
* rrrr rrzz >=16KB
|
||||
* rrrr rzzz >=32KB
|
||||
* rrrr zzzz >=64KB
|
||||
* .......
|
||||
*/
|
||||
shift = mmu_psize_defs[i].shift - LP_SHIFT;
|
||||
if (shift > LP_BITS)
|
||||
shift = LP_BITS;
|
||||
mask = (1 << shift) - 1;
|
||||
if ((lp & mask) == mmu_psize_defs[psize].penc[i])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
|
||||
{
|
||||
/* Look at the 8 bit LP value */
|
||||
unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
|
||||
|
||||
if (!(hptep->v & HPTE_V_VALID))
|
||||
return -1;
|
||||
|
||||
/* First check if it is large page */
|
||||
if (!(hptep->v & HPTE_V_LARGE))
|
||||
return MMU_PAGE_4K;
|
||||
|
||||
return __hpte_actual_psize(lp, psize);
|
||||
}
|
||||
|
||||
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
unsigned long vpn, int psize, int ssize,
|
||||
int local)
|
||||
unsigned long vpn, int bpsize,
|
||||
int apsize, int ssize, int local)
|
||||
{
|
||||
struct hash_pte *hptep = htab_address + slot;
|
||||
unsigned long hpte_v, want_v;
|
||||
int ret = 0;
|
||||
int actual_psize;
|
||||
|
||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
|
||||
|
||||
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
|
||||
vpn, want_v & HPTE_V_AVPN, slot, newpp);
|
||||
@ -335,7 +289,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
native_lock_hpte(hptep);
|
||||
|
||||
hpte_v = hptep->v;
|
||||
actual_psize = hpte_actual_psize(hptep, psize);
|
||||
/*
|
||||
* We need to invalidate the TLB always because hpte_remove doesn't do
|
||||
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
|
||||
@ -343,12 +296,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
* (hpte_remove) because we assume the old translation is still
|
||||
* technically "valid".
|
||||
*/
|
||||
if (actual_psize < 0) {
|
||||
actual_psize = psize;
|
||||
ret = -1;
|
||||
goto err_out;
|
||||
}
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v)) {
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
|
||||
DBG_LOW(" -> miss\n");
|
||||
ret = -1;
|
||||
} else {
|
||||
@ -357,11 +305,10 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
|
||||
}
|
||||
err_out:
|
||||
native_unlock_hpte(hptep);
|
||||
|
||||
/* Ensure it is out of the tlb too. */
|
||||
tlbie(vpn, psize, actual_psize, ssize, local);
|
||||
tlbie(vpn, bpsize, apsize, ssize, local);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -402,7 +349,6 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
|
||||
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
int psize, int ssize)
|
||||
{
|
||||
int actual_psize;
|
||||
unsigned long vpn;
|
||||
unsigned long vsid;
|
||||
long slot;
|
||||
@ -415,36 +361,33 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
if (slot == -1)
|
||||
panic("could not find page to bolt\n");
|
||||
hptep = htab_address + slot;
|
||||
actual_psize = hpte_actual_psize(hptep, psize);
|
||||
if (actual_psize < 0)
|
||||
actual_psize = psize;
|
||||
|
||||
/* Update the HPTE */
|
||||
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PP | HPTE_R_N));
|
||||
|
||||
/* Ensure it is out of the tlb too. */
|
||||
tlbie(vpn, psize, actual_psize, ssize, 0);
|
||||
/*
|
||||
* Ensure it is out of the tlb too. Bolted entries base and
|
||||
* actual page size will be same.
|
||||
*/
|
||||
tlbie(vpn, psize, psize, ssize, 0);
|
||||
}
|
||||
|
||||
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
||||
int psize, int ssize, int local)
|
||||
int bpsize, int apsize, int ssize, int local)
|
||||
{
|
||||
struct hash_pte *hptep = htab_address + slot;
|
||||
unsigned long hpte_v;
|
||||
unsigned long want_v;
|
||||
unsigned long flags;
|
||||
int actual_psize;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
|
||||
|
||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
|
||||
native_lock_hpte(hptep);
|
||||
hpte_v = hptep->v;
|
||||
|
||||
actual_psize = hpte_actual_psize(hptep, psize);
|
||||
/*
|
||||
* We need to invalidate the TLB always because hpte_remove doesn't do
|
||||
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
|
||||
@ -452,23 +395,120 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
||||
* (hpte_remove) because we assume the old translation is still
|
||||
* technically "valid".
|
||||
*/
|
||||
if (actual_psize < 0) {
|
||||
actual_psize = psize;
|
||||
native_unlock_hpte(hptep);
|
||||
goto err_out;
|
||||
}
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v))
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
|
||||
native_unlock_hpte(hptep);
|
||||
else
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
hptep->v = 0;
|
||||
|
||||
err_out:
|
||||
/* Invalidate the TLB */
|
||||
tlbie(vpn, psize, actual_psize, ssize, local);
|
||||
tlbie(vpn, bpsize, apsize, ssize, local);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void native_hugepage_invalidate(struct mm_struct *mm,
|
||||
unsigned char *hpte_slot_array,
|
||||
unsigned long addr, int psize)
|
||||
{
|
||||
int ssize = 0, i;
|
||||
int lock_tlbie;
|
||||
struct hash_pte *hptep;
|
||||
int actual_psize = MMU_PAGE_16M;
|
||||
unsigned int max_hpte_count, valid;
|
||||
unsigned long flags, s_addr = addr;
|
||||
unsigned long hpte_v, want_v, shift;
|
||||
unsigned long hidx, vpn = 0, vsid, hash, slot;
|
||||
|
||||
shift = mmu_psize_defs[psize].shift;
|
||||
max_hpte_count = 1U << (PMD_SHIFT - shift);
|
||||
|
||||
local_irq_save(flags);
|
||||
for (i = 0; i < max_hpte_count; i++) {
|
||||
valid = hpte_valid(hpte_slot_array, i);
|
||||
if (!valid)
|
||||
continue;
|
||||
hidx = hpte_hash_index(hpte_slot_array, i);
|
||||
|
||||
/* get the vpn */
|
||||
addr = s_addr + (i * (1ul << shift));
|
||||
if (!is_kernel_addr(addr)) {
|
||||
ssize = user_segment_size(addr);
|
||||
vsid = get_vsid(mm->context.id, addr, ssize);
|
||||
WARN_ON(vsid == 0);
|
||||
} else {
|
||||
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
|
||||
ssize = mmu_kernel_ssize;
|
||||
}
|
||||
|
||||
vpn = hpt_vpn(addr, vsid, ssize);
|
||||
hash = hpt_hash(vpn, shift, ssize);
|
||||
if (hidx & _PTEIDX_SECONDARY)
|
||||
hash = ~hash;
|
||||
|
||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
slot += hidx & _PTEIDX_GROUP_IX;
|
||||
|
||||
hptep = htab_address + slot;
|
||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||
native_lock_hpte(hptep);
|
||||
hpte_v = hptep->v;
|
||||
|
||||
/* Even if we miss, we need to invalidate the TLB */
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
|
||||
native_unlock_hpte(hptep);
|
||||
else
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
hptep->v = 0;
|
||||
}
|
||||
/*
|
||||
* Since this is a hugepage, we just need a single tlbie.
|
||||
* use the last vpn.
|
||||
*/
|
||||
lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
|
||||
asm volatile("ptesync":::"memory");
|
||||
__tlbie(vpn, psize, actual_psize, ssize);
|
||||
asm volatile("eieio; tlbsync; ptesync":::"memory");
|
||||
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline int __hpte_actual_psize(unsigned int lp, int psize)
|
||||
{
|
||||
int i, shift;
|
||||
unsigned int mask;
|
||||
|
||||
/* start from 1 ignoring MMU_PAGE_4K */
|
||||
for (i = 1; i < MMU_PAGE_COUNT; i++) {
|
||||
|
||||
/* invalid penc */
|
||||
if (mmu_psize_defs[psize].penc[i] == -1)
|
||||
continue;
|
||||
/*
|
||||
* encoding bits per actual page size
|
||||
* PTE LP actual page size
|
||||
* rrrr rrrz >=8KB
|
||||
* rrrr rrzz >=16KB
|
||||
* rrrr rzzz >=32KB
|
||||
* rrrr zzzz >=64KB
|
||||
* .......
|
||||
*/
|
||||
shift = mmu_psize_defs[i].shift - LP_SHIFT;
|
||||
if (shift > LP_BITS)
|
||||
shift = LP_BITS;
|
||||
mask = (1 << shift) - 1;
|
||||
if ((lp & mask) == mmu_psize_defs[psize].penc[i])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
||||
int *psize, int *apsize, int *ssize, unsigned long *vpn)
|
||||
{
|
||||
@ -672,4 +712,5 @@ void __init hpte_init_native(void)
|
||||
ppc_md.hpte_remove = native_hpte_remove;
|
||||
ppc_md.hpte_clear_all = native_hpte_clear;
|
||||
ppc_md.flush_hash_range = native_flush_hash_range;
|
||||
ppc_md.hugepage_invalidate = native_hugepage_invalidate;
|
||||
}
|
||||
|
@ -807,7 +807,7 @@ void __init early_init_mmu(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __cpuinit early_init_mmu_secondary(void)
|
||||
void early_init_mmu_secondary(void)
|
||||
{
|
||||
/* Initialize hash table for that CPU */
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
@ -1050,13 +1050,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
if (hugeshift) {
|
||||
rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
|
||||
ssize, hugeshift, psize);
|
||||
if (pmd_trans_huge(*(pmd_t *)ptep))
|
||||
rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
|
||||
trap, local, ssize, psize);
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
else
|
||||
rc = __hash_page_huge(ea, access, vsid, ptep, trap,
|
||||
local, ssize, hugeshift, psize);
|
||||
#else
|
||||
else {
|
||||
/*
|
||||
* if we have hugeshift, and is not transhuge with
|
||||
* hugetlb disabled, something is really wrong.
|
||||
*/
|
||||
rc = 1;
|
||||
WARN_ON(1);
|
||||
}
|
||||
#endif
|
||||
goto bail;
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
|
||||
@ -1145,6 +1158,7 @@ EXPORT_SYMBOL_GPL(hash_page);
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
unsigned long access, unsigned long trap)
|
||||
{
|
||||
int hugepage_shift;
|
||||
unsigned long vsid;
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep;
|
||||
@ -1166,10 +1180,27 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
pgdir = mm->pgd;
|
||||
if (pgdir == NULL)
|
||||
return;
|
||||
ptep = find_linux_pte(pgdir, ea);
|
||||
if (!ptep)
|
||||
return;
|
||||
|
||||
/* Get VSID */
|
||||
ssize = user_segment_size(ea);
|
||||
vsid = get_vsid(mm->context.id, ea, ssize);
|
||||
if (!vsid)
|
||||
return;
|
||||
/*
|
||||
* Hash doesn't like irqs. Walking linux page table with irq disabled
|
||||
* saves us from holding multiple locks.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* THP pages use update_mmu_cache_pmd. We don't do
|
||||
* hash preload there. Hence can ignore THP here
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
|
||||
if (!ptep)
|
||||
goto out_exit;
|
||||
|
||||
WARN_ON(hugepage_shift);
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
|
||||
* a 64K kernel), then we don't preload, hash_page() will take
|
||||
@ -1178,18 +1209,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
* page size demotion here
|
||||
*/
|
||||
if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
|
||||
return;
|
||||
goto out_exit;
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
/* Get VSID */
|
||||
ssize = user_segment_size(ea);
|
||||
vsid = get_vsid(mm->context.id, ea, ssize);
|
||||
if (!vsid)
|
||||
return;
|
||||
|
||||
/* Hash doesn't like irqs */
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Is that local to this CPU ? */
|
||||
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
||||
local = 1;
|
||||
@ -1211,7 +1233,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
mm->context.user_psize,
|
||||
mm->context.user_psize,
|
||||
pte_val(*ptep));
|
||||
|
||||
out_exit:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -1232,7 +1254,11 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
|
||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
slot += hidx & _PTEIDX_GROUP_IX;
|
||||
DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
|
||||
ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
|
||||
/*
|
||||
* We use same base page size and actual psize, because we don't
|
||||
* use these functions for hugepage
|
||||
*/
|
||||
ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
|
||||
} pte_iterate_hashed_end();
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
@ -1365,7 +1391,8 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||
hash = ~hash;
|
||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
slot += hidx & _PTEIDX_GROUP_IX;
|
||||
ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
|
||||
ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
|
||||
mmu_kernel_ssize, 0);
|
||||
}
|
||||
|
||||
void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
|
175
arch/powerpc/mm/hugepage-hash64.c
Normal file
175
arch/powerpc/mm/hugepage-hash64.c
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Copyright IBM Corporation, 2013
|
||||
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2.1 of the GNU Lesser General Public License
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* PPC64 THP Support for hash based MMUs
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
pmd_t *pmdp, unsigned long trap, int local, int ssize,
|
||||
unsigned int psize)
|
||||
{
|
||||
unsigned int index, valid;
|
||||
unsigned char *hpte_slot_array;
|
||||
unsigned long rflags, pa, hidx;
|
||||
unsigned long old_pmd, new_pmd;
|
||||
int ret, lpsize = MMU_PAGE_16M;
|
||||
unsigned long vpn, hash, shift, slot;
|
||||
|
||||
/*
|
||||
* atomically mark the linux large page PMD busy and dirty
|
||||
*/
|
||||
do {
|
||||
old_pmd = pmd_val(*pmdp);
|
||||
/* If PMD busy, retry the access */
|
||||
if (unlikely(old_pmd & _PAGE_BUSY))
|
||||
return 0;
|
||||
/* If PMD is trans splitting retry the access */
|
||||
if (unlikely(old_pmd & _PAGE_SPLITTING))
|
||||
return 0;
|
||||
/* If PMD permissions don't match, take page fault */
|
||||
if (unlikely(access & ~old_pmd))
|
||||
return 1;
|
||||
/*
|
||||
* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
||||
* a write access
|
||||
*/
|
||||
new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
|
||||
if (access & _PAGE_RW)
|
||||
new_pmd |= _PAGE_DIRTY;
|
||||
} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
|
||||
old_pmd, new_pmd));
|
||||
/*
|
||||
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
|
||||
* need to add in 0x1 if it's a read-only user page
|
||||
*/
|
||||
rflags = new_pmd & _PAGE_USER;
|
||||
if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
|
||||
(new_pmd & _PAGE_DIRTY)))
|
||||
rflags |= 0x1;
|
||||
/*
|
||||
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
|
||||
*/
|
||||
rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
|
||||
|
||||
#if 0
|
||||
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
|
||||
|
||||
/*
|
||||
* No CPU has hugepages but lacks no execute, so we
|
||||
* don't need to worry about that case
|
||||
*/
|
||||
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Find the slot index details for this ea, using base page size.
|
||||
*/
|
||||
shift = mmu_psize_defs[psize].shift;
|
||||
index = (ea & ~HPAGE_PMD_MASK) >> shift;
|
||||
BUG_ON(index >= 4096);
|
||||
|
||||
vpn = hpt_vpn(ea, vsid, ssize);
|
||||
hash = hpt_hash(vpn, shift, ssize);
|
||||
hpte_slot_array = get_hpte_slot_array(pmdp);
|
||||
|
||||
valid = hpte_valid(hpte_slot_array, index);
|
||||
if (valid) {
|
||||
/* update the hpte bits */
|
||||
hidx = hpte_hash_index(hpte_slot_array, index);
|
||||
if (hidx & _PTEIDX_SECONDARY)
|
||||
hash = ~hash;
|
||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
slot += hidx & _PTEIDX_GROUP_IX;
|
||||
|
||||
ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
|
||||
psize, lpsize, ssize, local);
|
||||
/*
|
||||
* We failed to update, try to insert a new entry.
|
||||
*/
|
||||
if (ret == -1) {
|
||||
/*
|
||||
* large pte is marked busy, so we can be sure
|
||||
* nobody is looking at hpte_slot_array. hence we can
|
||||
* safely update this here.
|
||||
*/
|
||||
valid = 0;
|
||||
new_pmd &= ~_PAGE_HPTEFLAGS;
|
||||
hpte_slot_array[index] = 0;
|
||||
} else
|
||||
/* clear the busy bits and set the hash pte bits */
|
||||
new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
||||
}
|
||||
|
||||
if (!valid) {
|
||||
unsigned long hpte_group;
|
||||
|
||||
/* insert new entry */
|
||||
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
||||
repeat:
|
||||
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
||||
|
||||
/* clear the busy bits and set the hash pte bits */
|
||||
new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
||||
|
||||
/* Add in WIMG bits */
|
||||
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
|
||||
_PAGE_COHERENT | _PAGE_GUARDED));
|
||||
|
||||
/* Insert into the hash table, primary slot */
|
||||
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
|
||||
psize, lpsize, ssize);
|
||||
/*
|
||||
* Primary is full, try the secondary
|
||||
*/
|
||||
if (unlikely(slot == -1)) {
|
||||
hpte_group = ((~hash & htab_hash_mask) *
|
||||
HPTES_PER_GROUP) & ~0x7UL;
|
||||
slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
|
||||
rflags, HPTE_V_SECONDARY,
|
||||
psize, lpsize, ssize);
|
||||
if (slot == -1) {
|
||||
if (mftb() & 0x1)
|
||||
hpte_group = ((hash & htab_hash_mask) *
|
||||
HPTES_PER_GROUP) & ~0x7UL;
|
||||
|
||||
ppc_md.hpte_remove(hpte_group);
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Hypervisor failure. Restore old pmd and return -1
|
||||
* similar to __hash_page_*
|
||||
*/
|
||||
if (unlikely(slot == -2)) {
|
||||
*pmdp = __pmd(old_pmd);
|
||||
hash_failure_debug(ea, access, vsid, trap, ssize,
|
||||
psize, lpsize, old_pmd);
|
||||
return -1;
|
||||
}
|
||||
/*
|
||||
* large pte is marked busy, so we can be sure
|
||||
* nobody is looking at hpte_slot_array. hence we can
|
||||
* safely update this here.
|
||||
*/
|
||||
mark_hpte_slot_valid(hpte_slot_array, index, slot);
|
||||
}
|
||||
/*
|
||||
* No need to use ldarx/stdcx here
|
||||
*/
|
||||
*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
|
||||
return 0;
|
||||
}
|
@ -81,7 +81,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
slot += (old_pte & _PAGE_F_GIX) >> 12;
|
||||
|
||||
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
|
||||
ssize, local) == -1)
|
||||
mmu_psize, ssize, local) == -1)
|
||||
old_pte &= ~_PAGE_HPTEFLAGS;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,9 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/hugetlb.h>
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
#define PAGE_SHIFT_64K 16
|
||||
#define PAGE_SHIFT_16M 24
|
||||
@ -100,68 +103,9 @@ int pgd_huge(pgd_t pgd)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We have 4 cases for pgds and pmds:
|
||||
* (1) invalid (all zeroes)
|
||||
* (2) pointer to next table, as normal; bottom 6 bits == 0
|
||||
* (3) leaf pte for huge page, bottom two bits != 00
|
||||
* (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
|
||||
*/
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
|
||||
{
|
||||
pgd_t *pg;
|
||||
pud_t *pu;
|
||||
pmd_t *pm;
|
||||
pte_t *ret_pte;
|
||||
hugepd_t *hpdp = NULL;
|
||||
unsigned pdshift = PGDIR_SHIFT;
|
||||
|
||||
if (shift)
|
||||
*shift = 0;
|
||||
|
||||
pg = pgdir + pgd_index(ea);
|
||||
|
||||
if (pgd_huge(*pg)) {
|
||||
ret_pte = (pte_t *) pg;
|
||||
goto out;
|
||||
} else if (is_hugepd(pg))
|
||||
hpdp = (hugepd_t *)pg;
|
||||
else if (!pgd_none(*pg)) {
|
||||
pdshift = PUD_SHIFT;
|
||||
pu = pud_offset(pg, ea);
|
||||
|
||||
if (pud_huge(*pu)) {
|
||||
ret_pte = (pte_t *) pu;
|
||||
goto out;
|
||||
} else if (is_hugepd(pu))
|
||||
hpdp = (hugepd_t *)pu;
|
||||
else if (!pud_none(*pu)) {
|
||||
pdshift = PMD_SHIFT;
|
||||
pm = pmd_offset(pu, ea);
|
||||
|
||||
if (pmd_huge(*pm)) {
|
||||
ret_pte = (pte_t *) pm;
|
||||
goto out;
|
||||
} else if (is_hugepd(pm))
|
||||
hpdp = (hugepd_t *)pm;
|
||||
else if (!pmd_none(*pm))
|
||||
return pte_offset_kernel(pm, ea);
|
||||
}
|
||||
}
|
||||
if (!hpdp)
|
||||
return NULL;
|
||||
|
||||
ret_pte = hugepte_offset(hpdp, ea, pdshift);
|
||||
pdshift = hugepd_shift(*hpdp);
|
||||
out:
|
||||
if (shift)
|
||||
*shift = pdshift;
|
||||
return ret_pte;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
/* Only called for hugetlbfs pages, hence can ignore THP */
|
||||
return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
||||
}
|
||||
|
||||
@ -736,11 +680,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
||||
struct page *page;
|
||||
unsigned shift;
|
||||
unsigned long mask;
|
||||
|
||||
/*
|
||||
* Transparent hugepages are handled by generic code. We can skip them
|
||||
* here.
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
|
||||
/* Verify it is a huge page else bail. */
|
||||
if (!ptep || !shift)
|
||||
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mask = (1UL << shift) - 1;
|
||||
@ -759,69 +706,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask;
|
||||
unsigned long pte_end;
|
||||
struct page *head, *page, *tail;
|
||||
pte_t pte;
|
||||
int refs;
|
||||
|
||||
pte_end = (addr + sz) & ~(sz-1);
|
||||
if (pte_end < end)
|
||||
end = pte_end;
|
||||
|
||||
pte = *ptep;
|
||||
mask = _PAGE_PRESENT | _PAGE_USER;
|
||||
if (write)
|
||||
mask |= _PAGE_RW;
|
||||
|
||||
if ((pte_val(pte) & mask) != mask)
|
||||
return 0;
|
||||
|
||||
/* hugepages are never "special" */
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
refs = 0;
|
||||
head = pte_page(pte);
|
||||
|
||||
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
|
||||
tail = page;
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (!page_cache_add_speculative(head, refs)) {
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
/* Could be optimized better */
|
||||
*nr -= refs;
|
||||
while (refs--)
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any tail page need their mapcount reference taken before we
|
||||
* return.
|
||||
*/
|
||||
while (refs--) {
|
||||
if (PageTail(tail))
|
||||
get_huge_page_tail(tail);
|
||||
tail++;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
|
||||
unsigned long sz)
|
||||
{
|
||||
@ -1038,3 +922,168 @@ void flush_dcache_icache_hugepage(struct page *page)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
/*
|
||||
* We have 4 cases for pgds and pmds:
|
||||
* (1) invalid (all zeroes)
|
||||
* (2) pointer to next table, as normal; bottom 6 bits == 0
|
||||
* (3) leaf pte for huge page, bottom two bits != 00
|
||||
* (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
|
||||
*
|
||||
* So long as we atomically load page table pointers we are safe against teardown,
|
||||
* we can follow the address down to the the page and take a ref on it.
|
||||
*/
|
||||
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
|
||||
{
|
||||
pgd_t pgd, *pgdp;
|
||||
pud_t pud, *pudp;
|
||||
pmd_t pmd, *pmdp;
|
||||
pte_t *ret_pte;
|
||||
hugepd_t *hpdp = NULL;
|
||||
unsigned pdshift = PGDIR_SHIFT;
|
||||
|
||||
if (shift)
|
||||
*shift = 0;
|
||||
|
||||
pgdp = pgdir + pgd_index(ea);
|
||||
pgd = ACCESS_ONCE(*pgdp);
|
||||
/*
|
||||
* Always operate on the local stack value. This make sure the
|
||||
* value don't get updated by a parallel THP split/collapse,
|
||||
* page fault or a page unmap. The return pte_t * is still not
|
||||
* stable. So should be checked there for above conditions.
|
||||
*/
|
||||
if (pgd_none(pgd))
|
||||
return NULL;
|
||||
else if (pgd_huge(pgd)) {
|
||||
ret_pte = (pte_t *) pgdp;
|
||||
goto out;
|
||||
} else if (is_hugepd(&pgd))
|
||||
hpdp = (hugepd_t *)&pgd;
|
||||
else {
|
||||
/*
|
||||
* Even if we end up with an unmap, the pgtable will not
|
||||
* be freed, because we do an rcu free and here we are
|
||||
* irq disabled
|
||||
*/
|
||||
pdshift = PUD_SHIFT;
|
||||
pudp = pud_offset(&pgd, ea);
|
||||
pud = ACCESS_ONCE(*pudp);
|
||||
|
||||
if (pud_none(pud))
|
||||
return NULL;
|
||||
else if (pud_huge(pud)) {
|
||||
ret_pte = (pte_t *) pudp;
|
||||
goto out;
|
||||
} else if (is_hugepd(&pud))
|
||||
hpdp = (hugepd_t *)&pud;
|
||||
else {
|
||||
pdshift = PMD_SHIFT;
|
||||
pmdp = pmd_offset(&pud, ea);
|
||||
pmd = ACCESS_ONCE(*pmdp);
|
||||
/*
|
||||
* A hugepage collapse is captured by pmd_none, because
|
||||
* it mark the pmd none and do a hpte invalidate.
|
||||
*
|
||||
* A hugepage split is captured by pmd_trans_splitting
|
||||
* because we mark the pmd trans splitting and do a
|
||||
* hpte invalidate
|
||||
*
|
||||
*/
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return NULL;
|
||||
|
||||
if (pmd_huge(pmd) || pmd_large(pmd)) {
|
||||
ret_pte = (pte_t *) pmdp;
|
||||
goto out;
|
||||
} else if (is_hugepd(&pmd))
|
||||
hpdp = (hugepd_t *)&pmd;
|
||||
else
|
||||
return pte_offset_kernel(&pmd, ea);
|
||||
}
|
||||
}
|
||||
if (!hpdp)
|
||||
return NULL;
|
||||
|
||||
ret_pte = hugepte_offset(hpdp, ea, pdshift);
|
||||
pdshift = hugepd_shift(*hpdp);
|
||||
out:
|
||||
if (shift)
|
||||
*shift = pdshift;
|
||||
return ret_pte;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
|
||||
|
||||
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask;
|
||||
unsigned long pte_end;
|
||||
struct page *head, *page, *tail;
|
||||
pte_t pte;
|
||||
int refs;
|
||||
|
||||
pte_end = (addr + sz) & ~(sz-1);
|
||||
if (pte_end < end)
|
||||
end = pte_end;
|
||||
|
||||
pte = ACCESS_ONCE(*ptep);
|
||||
mask = _PAGE_PRESENT | _PAGE_USER;
|
||||
if (write)
|
||||
mask |= _PAGE_RW;
|
||||
|
||||
if ((pte_val(pte) & mask) != mask)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/*
|
||||
* check for splitting here
|
||||
*/
|
||||
if (pmd_trans_splitting(pte_pmd(pte)))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
/* hugepages are never "special" */
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
refs = 0;
|
||||
head = pte_page(pte);
|
||||
|
||||
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
|
||||
tail = page;
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (!page_cache_add_speculative(head, refs)) {
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
/* Could be optimized better */
|
||||
*nr -= refs;
|
||||
while (refs--)
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any tail page need their mapcount reference taken before we
|
||||
* return.
|
||||
*/
|
||||
while (refs--) {
|
||||
if (PageTail(tail))
|
||||
get_huge_page_tail(tail);
|
||||
tail++;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -88,7 +88,11 @@ static void pgd_ctor(void *addr)
|
||||
|
||||
static void pmd_ctor(void *addr)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
memset(addr, 0, PMD_TABLE_SIZE * 2);
|
||||
#else
|
||||
memset(addr, 0, PMD_TABLE_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
|
||||
@ -137,10 +141,9 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
|
||||
void pgtable_cache_init(void)
|
||||
{
|
||||
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
|
||||
pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
|
||||
if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
|
||||
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
|
||||
if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
|
||||
panic("Couldn't allocate pgtable caches");
|
||||
|
||||
/* In all current configs, when the PUD index exists it's the
|
||||
* same size as either the pgd or pmd index. Verify that the
|
||||
* initialization above has also created a PUD cache. This
|
||||
|
@ -461,6 +461,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
#ifdef CONFIG_PPC_STD_MMU
|
||||
/*
|
||||
* We don't need to worry about _PAGE_PRESENT here because we are
|
||||
* called with either mm->page_table_lock held or ptl lock held
|
||||
*/
|
||||
unsigned long access = 0, trap;
|
||||
|
||||
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
|
||||
|
@ -112,8 +112,10 @@ static unsigned int steal_context_smp(unsigned int id)
|
||||
*/
|
||||
for_each_cpu(cpu, mm_cpumask(mm)) {
|
||||
for (i = cpu_first_thread_sibling(cpu);
|
||||
i <= cpu_last_thread_sibling(cpu); i++)
|
||||
__set_bit(id, stale_map[i]);
|
||||
i <= cpu_last_thread_sibling(cpu); i++) {
|
||||
if (stale_map[i])
|
||||
__set_bit(id, stale_map[i]);
|
||||
}
|
||||
cpu = i - 1;
|
||||
}
|
||||
return id;
|
||||
@ -272,7 +274,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
|
||||
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
|
||||
for (i = cpu_first_thread_sibling(cpu);
|
||||
i <= cpu_last_thread_sibling(cpu); i++) {
|
||||
__clear_bit(id, stale_map[i]);
|
||||
if (stale_map[i])
|
||||
__clear_bit(id, stale_map[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -329,8 +332,8 @@ void destroy_context(struct mm_struct *mm)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
static int mmu_context_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)(long)hcpu;
|
||||
|
||||
@ -363,7 +366,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
|
||||
static struct notifier_block mmu_context_cpu_nb = {
|
||||
.notifier_call = mmu_context_cpu_notify,
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user