2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-20 00:26:39 +08:00

Merge branch 'slab/urgent' into slab/next

This commit is contained in:
Pekka Enberg 2011-09-19 17:46:07 +03:00
commit d20bbfab01
880 changed files with 40706 additions and 13347 deletions

17
CREDITS
View File

@ -504,7 +504,7 @@ N: Dominik Brodowski
E: linux@brodo.de
W: http://www.brodo.de/
P: 1024D/725B37C6 190F 3E77 9C89 3B6D BECD 46EE 67C3 0308 725B 37C6
D: parts of CPUFreq code, ACPI bugfixes
D: parts of CPUFreq code, ACPI bugfixes, PCMCIA rewrite, cpufrequtils
S: Tuebingen, Germany
N: Andries Brouwer
@ -857,6 +857,10 @@ S: One Dell Way
S: Round Rock, TX 78682
S: USA
N: Mattia Dongili
E: malattia@gmail.com
D: cpufrequtils (precursor to cpupowerutils)
N: Ben Dooks
E: ben-linux@fluff.org
E: ben@simtec.co.uk
@ -1883,6 +1887,11 @@ S: Kruislaan 419
S: 1098 VA Amsterdam
S: The Netherlands
N: Goran Koruga
E: korugag@siol.net
D: cpufrequtils (precursor to cpupowerutils)
S: Slovenia
N: Jiri Kosina
E: jikos@jikos.cz
E: jkosina@suse.cz
@ -2916,6 +2925,12 @@ S: Schlossbergring 9
S: 79098 Freiburg
S: Germany
N: Thomas Renninger
E: trenn@suse.de
D: cpupowerutils
S: SUSE Linux GmbH
S: Germany
N: Joerg Reuter
E: jreuter@yaina.de
W: http://yaina.de/jreuter/

View File

@ -39,3 +39,9 @@ Description: Generic interface to platform dependent persistent storage.
multiple) files based on the record size of the underlying
persistent storage until at least this amount is reached.
Default is 10 Kbytes.
Pstore only supports one backend at a time. If multiple
backends are available, the preferred backend may be
set by passing the pstore.backend= argument to the kernel at
boot time.

View File

@ -4,3 +4,20 @@ KernelVersion: 2.6.37
Contact: "Ike Panhc <ike.pan@canonical.com>"
Description:
Control the power of camera module. 1 means on, 0 means off.
What: /sys/devices/platform/ideapad/cfg
Date: Jun 2011
KernelVersion: 3.1
Contact: "Ike Panhc <ike.pan@canonical.com>"
Description:
Ideapad capability bits.
Bit 8-10: 1 - Intel graphic only
2 - ATI graphic only
3 - Nvidia graphic only
4 - Intel and ATI graphic
5 - Intel and Nvidia graphic
Bit 16: Bluetooth exist (1 for exist)
Bit 17: 3G exist (1 for exist)
Bit 18: Wifi exist (1 for exist)
Bit 19: Camera exist (1 for exist)

View File

@ -80,22 +80,13 @@ available tools.
The limit on the length of lines is 80 columns and this is a strongly
preferred limit.
Statements longer than 80 columns will be broken into sensible chunks.
Descendants are always substantially shorter than the parent and are placed
substantially to the right. The same applies to function headers with a long
argument list. Long strings are as well broken into shorter strings. The
only exception to this is where exceeding 80 columns significantly increases
readability and does not hide information.
Statements longer than 80 columns will be broken into sensible chunks, unless
exceeding 80 columns significantly increases readability and does not hide
information. Descendants are always substantially shorter than the parent and
are placed substantially to the right. The same applies to function headers
with a long argument list. However, never break user-visible strings such as
printk messages, because that breaks the ability to grep for them.
void fun(int a, int b, int c)
{
if (condition)
printk(KERN_WARNING "Warning this is a long printk with "
"3 parameters a: %u b: %u "
"c: %u \n", a, b, c);
else
next_statement;
}
Chapter 3: Placing Braces and Spaces

View File

@ -48,12 +48,19 @@ directory apei/einj. The following files are provided.
- param1
This file is used to set the first error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
physical memory address.
physical memory address. Only available if param_extension module
parameter is specified.
- param2
This file is used to set the second error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
physical memory address mask.
physical memory address mask. Only available if param_extension
module parameter is specified.
Injecting parameter support is a BIOS version specific extension, that
is, it only works on some BIOS version. If you want to use it, please
make sure your BIOS version has the proper support and specify
"param_extension=y" in module parameter.
For more information about EINJ, please refer to ACPI specification
version 4.0, section 17.5.

View File

@ -4,7 +4,8 @@ dm-crypt
Device-Mapper's "crypt" target provides transparent encryption of block devices
using the kernel crypto API.
Parameters: <cipher> <key> <iv_offset> <device path> <offset>
Parameters: <cipher> <key> <iv_offset> <device path> \
<offset> [<#opt_params> <opt_params>]
<cipher>
Encryption cipher and an optional IV generation mode.
@ -37,6 +38,24 @@ Parameters: <cipher> <key> <iv_offset> <device path> <offset>
<offset>
Starting sector within the device where the encrypted data begins.
<#opt_params>
Number of optional parameters. If there are no optional parameters,
the optional paramaters section can be skipped or #opt_params can be zero.
Otherwise #opt_params is the number of following arguments.
Example of optional parameters section:
1 allow_discards
allow_discards
Block discard requests (a.k.a. TRIM) are passed through the crypt device.
The default is to ignore discard requests.
WARNING: Assess the specific security risks carefully before enabling this
option. For example, allowing discards on encrypted devices may lead to
the leak of information about the ciphertext device (filesystem type,
used space etc.) if the discarded blocks can be located easily on the
device later.
Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk

View File

@ -1,17 +1,53 @@
dm-flakey
=========
This target is the same as the linear target except that it returns I/O
errors periodically. It's been found useful in simulating failing
devices for testing purposes.
This target is the same as the linear target except that it exhibits
unreliable behaviour periodically. It's been found useful in simulating
failing devices for testing purposes.
Starting from the time the table is loaded, the device is available for
<up interval> seconds, then returns errors for <down interval> seconds,
and then this cycle repeats.
<up interval> seconds, then exhibits unreliable behaviour for <down
interval> seconds, and then this cycle repeats.
Parameters: <dev path> <offset> <up interval> <down interval>
Also, consider using this in combination with the dm-delay target too,
which can delay reads and writes and/or send them to different
underlying devices.
Table parameters
----------------
<dev path> <offset> <up interval> <down interval> \
[<num_features> [<feature arguments>]]
Mandatory parameters:
<dev path>: Full pathname to the underlying block-device, or a
"major:minor" device-number.
<offset>: Starting sector within the device.
<up interval>: Number of seconds device is available.
<down interval>: Number of seconds device returns errors.
Optional feature parameters:
If no feature parameters are present, during the periods of
unreliability, all I/O returns errors.
drop_writes:
All write I/O is silently ignored.
Read I/O is handled correctly.
corrupt_bio_byte <Nth_byte> <direction> <value> <flags>:
During <down interval>, replace <Nth_byte> of the data of
each matching bio with <value>.
<Nth_byte>: The offset of the byte to replace.
Counting starts at 1, to replace the first byte.
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
'w' is incompatible with drop_writes.
<value>: The value (from 0-255) to write.
<flags>: Perform the replacement only if bio->bi_rw has all the
selected flags set.
Examples:
corrupt_bio_byte 32 r 1 0
- replaces the 32nd byte of READ bios with the value 1
corrupt_bio_byte 224 w 0 32
- replaces the 224th byte of REQ_META (=32) bios with the value 0

View File

@ -1,70 +1,108 @@
Device-mapper RAID (dm-raid) is a bridge from DM to MD. It
provides a way to use device-mapper interfaces to access the MD RAID
drivers.
dm-raid
-------
As with all device-mapper targets, the nominal public interfaces are the
constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO
and STATUSTYPE_TABLE). The CTR table looks like the following:
The device-mapper RAID (dm-raid) target provides a bridge from DM to MD.
It allows the MD RAID drivers to be accessed using a device-mapper
interface.
1: <s> <l> raid \
2: <raid_type> <#raid_params> <raid_params> \
3: <#raid_devs> <meta_dev1> <dev1> .. <meta_devN> <devN>
The target is named "raid" and it accepts the following parameters:
Line 1 contains the standard first three arguments to any device-mapper
target - the start, length, and target type fields. The target type in
this case is "raid".
<raid_type> <#raid_params> <raid_params> \
<#raid_devs> <metadata_dev0> <dev0> [.. <metadata_devN> <devN>]
Line 2 contains the arguments that define the particular raid
type/personality/level, the required arguments for that raid type, and
any optional arguments. Possible raid types include: raid4, raid5_la,
raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc. (raid1 is
planned for the future.) The list of required and optional parameters
is the same for all the current raid types. The required parameters are
positional, while the optional parameters are given as key/value pairs.
The possible parameters are as follows:
<chunk_size> Chunk size in sectors.
[[no]sync] Force/Prevent RAID initialization
[rebuild <idx>] Rebuild the drive indicated by the index
[daemon_sleep <ms>] Time between bitmap daemon work to clear bits
[min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
[max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
[max_write_behind <sectors>] See '-write-behind=' (man mdadm)
[stripe_cache <sectors>] Stripe cache size for higher RAIDs
<raid_type>:
raid1 RAID1 mirroring
raid4 RAID4 dedicated parity disk
raid5_la RAID5 left asymmetric
- rotating parity 0 with data continuation
raid5_ra RAID5 right asymmetric
- rotating parity N with data continuation
raid5_ls RAID5 left symmetric
- rotating parity 0 with data restart
raid5_rs RAID5 right symmetric
- rotating parity N with data restart
raid6_zr RAID6 zero restart
- rotating parity zero (left-to-right) with data restart
raid6_nr RAID6 N restart
- rotating parity N (right-to-left) with data restart
raid6_nc RAID6 N continue
- rotating parity N (right-to-left) with data continuation
Line 3 contains the list of devices that compose the array in
metadata/data device pairs. If the metadata is stored separately, a '-'
is given for the metadata device position. If a drive has failed or is
missing at creation time, a '-' can be given for both the metadata and
data drives for a given position.
Refererence: Chapter 4 of
http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
NB. Currently all metadata devices must be specified as '-'.
<#raid_params>: The number of parameters that follow.
Examples:
# RAID4 - 4 data drives, 1 parity
<raid_params> consists of
Mandatory parameters:
<chunk_size>: Chunk size in sectors. This parameter is often known as
"stripe size". It is the only mandatory parameter and
is placed first.
followed by optional parameters (in any order):
[sync|nosync] Force or prevent RAID initialization.
[rebuild <idx>] Rebuild drive number idx (first drive is 0).
[daemon_sleep <ms>]
Interval between runs of the bitmap daemon that
clear bits. A longer interval means less bitmap I/O but
resyncing after a failure is likely to take longer.
[min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
[max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
[write_mostly <idx>] Drive index is write-mostly
[max_write_behind <sectors>] See '-write-behind=' (man mdadm)
[stripe_cache <sectors>] Stripe cache size (higher RAIDs only)
[region_size <sectors>]
The region_size multiplied by the number of regions is the
logical size of the array. The bitmap records the device
synchronisation state for each region.
<#raid_devs>: The number of devices composing the array.
Each device consists of two entries. The first is the device
containing the metadata (if any); the second is the one containing the
data.
If a drive has failed or is missing at creation time, a '-' can be
given for both the metadata and data drives for a given position.
Example tables
--------------
# RAID4 - 4 data drives, 1 parity (no metadata devices)
# No metadata devices specified to hold superblock/bitmap info
# Chunk size of 1MiB
# (Lines separated for easy reading)
0 1960893648 raid \
raid4 1 2048 \
5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
# RAID4 - 4 data drives, 1 parity (no metadata devices)
# RAID4 - 4 data drives, 1 parity (with metadata devices)
# Chunk size of 1MiB, force RAID initialization,
# min recovery rate at 20 kiB/sec/disk
0 1960893648 raid \
raid4 4 2048 min_recovery_rate 20 sync\
5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
raid4 4 2048 sync min_recovery_rate 20 \
5 8:17 8:18 8:33 8:34 8:49 8:50 8:65 8:66 8:81 8:82
Performing a 'dmsetup table' should display the CTR table used to
construct the mapping (with possible reordering of optional
parameters).
'dmsetup table' displays the table used to construct the mapping.
The optional parameters are always printed in the order listed
above with "sync" or "nosync" always output ahead of the other
arguments, regardless of the order used when originally loading the table.
Arguments that can be repeated are ordered by value.
Performing a 'dmsetup status' will yield information on the state and
health of the array. The output is as follows:
'dmsetup status' yields information on the state and health of the
array.
The output is as follows:
1: <s> <l> raid \
2: <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
Line 1 is standard DM output. Line 2 is best shown by example:
Line 1 is the standard output produced by device-mapper.
Line 2 is produced by the raid target, and best explained by example:
0 1960893648 raid raid4 5 AAAAA 2/490221568
Here we can see the RAID type is raid4, there are 5 devices - all of
which are 'A'live, and the array is 2/490221568 complete with recovery.
Faulty or missing devices are marked 'D'. Devices that are out-of-sync
are marked 'a'.

View File

@ -10,7 +10,7 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- gpios: OF devcie-tree gpio specificatin.
- gpios: OF device-tree gpio specification.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.

View File

@ -0,0 +1,11 @@
* Freescale MMA8450 3-Axis Accelerometer
Required properties:
- compatible : "fsl,mma8450".
Example:
accelerometer: mma8450@1c {
compatible = "fsl,mma8450";
reg = <0x1c>;
};

View File

@ -10,87 +10,181 @@ NOTE: For DMA Engine usage in async_tx please see:
Below is a guide to device driver writers on how to use the Slave-DMA API of the
DMA Engine. This is applicable only for slave DMA usage only.
The slave DMA usage consists of following steps
The slave DMA usage consists of following steps:
1. Allocate a DMA slave channel
2. Set slave and controller specific parameters
3. Get a descriptor for transaction
4. Submit the transaction and wait for callback notification
4. Submit the transaction
5. Issue pending requests and wait for callback notification
1. Allocate a DMA slave channel
Channel allocation is slightly different in the slave DMA context, client
drivers typically need a channel from a particular DMA controller only and even
in some cases a specific channel is desired. To request a channel
dma_request_channel() API is used.
Interface:
struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
dma_filter_fn filter_fn,
void *filter_param);
where dma_filter_fn is defined as:
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
Channel allocation is slightly different in the slave DMA context,
client drivers typically need a channel from a particular DMA
controller only and even in some cases a specific channel is desired.
To request a channel dma_request_channel() API is used.
When the optional 'filter_fn' parameter is set to NULL dma_request_channel
simply returns the first channel that satisfies the capability mask. Otherwise,
when the mask parameter is insufficient for specifying the necessary channel,
the filter_fn routine can be used to disposition the available channels in the
system. The filter_fn routine is called once for each free channel in the
system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
that channel to be the return value from dma_request_channel. A channel
allocated via this interface is exclusive to the caller, until
dma_release_channel() is called.
Interface:
struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
dma_filter_fn filter_fn,
void *filter_param);
where dma_filter_fn is defined as:
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
The 'filter_fn' parameter is optional, but highly recommended for
slave and cyclic channels as they typically need to obtain a specific
DMA channel.
When the optional 'filter_fn' parameter is NULL, dma_request_channel()
simply returns the first channel that satisfies the capability mask.
Otherwise, the 'filter_fn' routine will be called once for each free
channel which has a capability in 'mask'. 'filter_fn' is expected to
return 'true' when the desired DMA channel is found.
A channel allocated via this interface is exclusive to the caller,
until dma_release_channel() is called.
2. Set slave and controller specific parameters
Next step is always to pass some specific information to the DMA driver. Most of
the generic information which a slave DMA can use is in struct dma_slave_config.
It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
burst lengths etc. If some DMA controllers have more parameters to be sent then
they should try to embed struct dma_slave_config in their controller specific
structure. That gives flexibility to client to pass more parameters, if
required.
Interface:
int dmaengine_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
Next step is always to pass some specific information to the DMA
driver. Most of the generic information which a slave DMA can use
is in struct dma_slave_config. This allows the clients to specify
DMA direction, DMA addresses, bus widths, DMA burst lengths etc
for the peripheral.
If some DMA controllers have more parameters to be sent then they
should try to embed struct dma_slave_config in their controller
specific structure. That gives flexibility to client to pass more
parameters, if required.
Interface:
int dmaengine_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
Please see the dma_slave_config structure definition in dmaengine.h
for a detailed explaination of the struct members. Please note
that the 'direction' member will be going away as it duplicates the
direction given in the prepare call.
3. Get a descriptor for transaction
For slave usage the various modes of slave transfers supported by the
DMA-engine are:
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
For slave usage the various modes of slave transfers supported by the
DMA-engine are:
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
operation is explicitly stopped.
The non NULL return of this transfer API represents a "descriptor" for the given
transaction.
Interface:
struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
A non-NULL return of this transfer API represents a "descriptor" for
the given transaction.
Interface:
struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
4. Submit the transaction and wait for callback notification
To schedule the transaction to be scheduled by dma device, the "descriptor"
returned in above (3) needs to be submitted.
To tell the dma driver that a transaction is ready to be serviced, the
descriptor->submit() callback needs to be invoked. This chains the descriptor to
the pending queue.
The transactions in the pending queue can be activated by calling the
issue_pending API. If channel is idle then the first transaction in queue is
started and subsequent ones queued up.
On completion of the DMA operation the next in queue is submitted and a tasklet
triggered. The tasklet would then call the client driver completion callback
routine for notification, if set.
Interface:
void dma_async_issue_pending(struct dma_chan *chan);
The peripheral driver is expected to have mapped the scatterlist for
the DMA operation prior to calling device_prep_slave_sg, and must
keep the scatterlist mapped until the DMA operation has completed.
The scatterlist must be mapped using the DMA struct device. So,
normal setup should look like this:
==============================================================================
nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
if (nr_sg == 0)
/* error */
Additional usage notes for dma driver writers
1/ Although DMA engine specifies that completion callback routines cannot submit
any new operations, but typically for slave DMA subsequent transaction may not
be available for submit prior to callback routine being called. This requirement
is not a requirement for DMA-slave devices. But they should take care to drop
the spin-lock they might be holding before calling the callback routine
desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg,
direction, flags);
Once a descriptor has been obtained, the callback information can be
added and the descriptor must then be submitted. Some DMA engine
drivers may hold a spinlock between a successful preparation and
submission so it is important that these two operations are closely
paired.
Note:
Although the async_tx API specifies that completion callback
routines cannot submit any new operations, this is not the
case for slave/cyclic DMA.
For slave DMA, the subsequent transaction may not be available
for submission prior to callback function being invoked, so
slave DMA callbacks are permitted to prepare and submit a new
transaction.
For cyclic DMA, a callback function may wish to terminate the
DMA via dmaengine_terminate_all().
Therefore, it is important that DMA engine drivers drop any
locks before calling the callback function which may cause a
deadlock.
Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context.
4. Submit the transaction
Once the descriptor has been prepared and the callback information
added, it must be placed on the DMA engine drivers pending queue.
Interface:
dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
This returns a cookie can be used to check the progress of DMA engine
activity via other DMA engine calls not covered in this document.
dmaengine_submit() will not start the DMA operation, it merely adds
it to the pending queue. For this, see step 5, dma_async_issue_pending.
5. Issue pending DMA requests and wait for callback notification
The transactions in the pending queue can be activated by calling the
issue_pending API. If channel is idle then the first transaction in
queue is started and subsequent ones queued up.
On completion of each DMA operation, the next in queue is started and
a tasklet triggered. The tasklet will then call the client driver
completion callback routine for notification, if set.
Interface:
void dma_async_issue_pending(struct dma_chan *chan);
Further APIs:
1. int dmaengine_terminate_all(struct dma_chan *chan)
This causes all activity for the DMA channel to be stopped, and may
discard data in the DMA FIFO which hasn't been fully transferred.
No callback functions will be called for any incomplete transfers.
2. int dmaengine_pause(struct dma_chan *chan)
This pauses activity on the DMA channel without data loss.
3. int dmaengine_resume(struct dma_chan *chan)
Resume a previously paused DMA channel. It is invalid to resume a
channel which is not currently paused.
4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
This can be used to check the status of the channel. Please see
the documentation in include/linux/dmaengine.h for a more complete
description of this API.
This can be used in conjunction with dma_async_is_complete() and
the cookie returned from 'descriptor->submit()' to check for
completion of a specific DMA transaction.
Note:
Not all DMA engine drivers can return reliable information for
a running DMA channel. It is recommended that DMA engine users
pause or stop (via dmaengine_terminate_all) the channel before
using this API.

View File

@ -143,8 +143,7 @@ o provide a way to configure fault attributes
failslab, fail_page_alloc, and fail_make_request use this way.
Helper functions:
init_fault_attr_dentries(entries, attr, name);
void cleanup_fault_attr_dentries(entries);
fault_create_debugfs_attr(name, parent, attr);
- module parameters

View File

@ -296,15 +296,6 @@ Who: Ravikiran Thirumalai <kiran@scalex86.org>
---------------------------
What: CONFIG_THERMAL_HWMON
When: January 2009
Why: This option was introduced just to allow older lm-sensors userspace
to keep working over the upgrade to 2.6.26. At the scheduled time of
removal fixed lm-sensors (2.x or 3.x) should be readily available.
Who: Rene Herman <rene.herman@gmail.com>
---------------------------
What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS
(in net/core/net-sysfs.c)
When: After the only user (hal) has seen a release with the patches
@ -590,3 +581,14 @@ Why: This driver has been superseded by g_mass_storage.
Who: Alan Stern <stern@rowland.harvard.edu>
----------------------------
What: threeg and interface sysfs files in /sys/devices/platform/acer-wmi
When: 2012
Why: In 3.0, we can now autodetect internal 3G device and already have
the threeg rfkill device. So, we plan to remove threeg sysfs support
for it's no longer necessary.
We also plan to remove interface sysfs file that exposed which ACPI-WMI
interface that was used by acer-wmi driver. It will replaced by
information log when acer-wmi initial.
Who: Lee, Chun-Yi <jlee@novell.com>

View File

@ -106,13 +106,20 @@ separated by spaces:
To use the first on-chip serial port at baud rate 115200, no parity, 8
bits, and no flow control.
(*) root=/dev/<xxxx>
(*) root=<xxxx>
This specifies the device upon which the root filesystem resides. For
example:
This specifies the device upon which the root filesystem resides. It
may be specified by major and minor number, device path, or even
partition uuid, if supported. For example:
/dev/nfs NFS root filesystem
/dev/mtdblock3 Fourth RedBoot partition on the System Flash
PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=1
first partition after the partition with the given UUID
253:0 Device with major 253 and minor 0
Authoritative information can be found in
"Documentation/kernel-parameters.txt".
(*) rw

View File

@ -292,6 +292,7 @@ Code Seq#(hex) Include File Comments
<mailto:buk@buks.ipn.de>
0xA0 all linux/sdp/sdp.h Industrial Device Project
<mailto:kenji@bitgate.com>
0xA2 00-0F arch/tile/include/asm/hardwall.h
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h

View File

@ -163,6 +163,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/power/pm.txt, pci=noacpi
acpi_rsdp= [ACPI,EFI,KEXEC]
Pass the RSDP address to the kernel, mostly used
on machines running EFI runtime service to boot the
second kernel for kdump.
acpi_apic_instance= [ACPI, IOAPIC]
Format: <int>
2: use 2nd APIC table, if available
@ -546,6 +551,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
/proc/<pid>/coredump_filter.
See also Documentation/filesystems/proc.txt.
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
@ -2153,6 +2161,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[HW,MOUSE] Controls Logitech smartscroll autorepeat.
0 = disabled, 1 = enabled (default).
pstore.backend= Specify the name of the pstore backend to use
pt. [PARIDE]
See Documentation/blockdev/paride.txt.
@ -2238,6 +2248,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
ro [KNL] Mount root device read-only on boot
root= [KNL] Root filesystem
See name_to_dev_t comment in init/do_mounts.c.
rootdelay= [KNL] Delay (in seconds) to pause before attempting to
mount the root filesystem

View File

@ -129,6 +129,20 @@ decimal 11 is the major of SCSI CD-ROMs, and the minor 0 stands for
the first of these. You can find out all valid major numbers by
looking into include/linux/major.h.
In addition to major and minor numbers, if the device containing your
root partition uses a partition table format with unique partition
identifiers, then you may use them. For instance,
"root=PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF". It is also
possible to reference another partition on the same device using a
known partition UUID as the starting point. For example,
if partition 5 of the device has the UUID of
00112233-4455-6677-8899-AABBCCDDEEFF then partition 3 may be found as
follows:
PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=-2
Authoritative information can be found in
"Documentation/kernel-parameters.txt".
2.2) ro, rw
-----------

View File

@ -599,7 +599,7 @@ num_unsol_na
affect only the active-backup mode. These options were added for
bonding versions 3.3.0 and 3.4.0 respectively.
From Linux 2.6.40 and bonding version 3.7.1, these notifications
From Linux 3.0 and bonding version 3.7.1, these notifications
are generated by the ipv4 and ipv6 code and the numbers of
repetitions cannot be set independently.

View File

@ -54,11 +54,10 @@ referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
callbacks should be invoked in atomic context with interrupts disabled
(->runtime_idle() is still invoked the default way). This implies that these
callback routines must not block or sleep, but it also means that the
synchronous helper functions listed at the end of Section 4 can be used within
an interrupt handler or in an atomic context.
callbacks should be invoked in atomic context with interrupts disabled.
This implies that these callback routines must not block or sleep, but it also
means that the synchronous helper functions listed at the end of Section 4 can
be used within an interrupt handler or in an atomic context.
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
the suspend of the device as appropriate, which may, but need not include
@ -483,6 +482,7 @@ pm_runtime_suspend()
pm_runtime_autosuspend()
pm_runtime_resume()
pm_runtime_get_sync()
pm_runtime_put_sync()
pm_runtime_put_sync_suspend()
5. Runtime PM Initialization, Device Probing and Removal

View File

@ -1925,6 +1925,12 @@ S: Maintained
F: arch/x86/kernel/cpuid.c
F: arch/x86/kernel/msr.c
CPU POWER MONITORING SUBSYSTEM
M: Dominik Brodowski <linux@dominikbrodowski.net>
M: Thomas Renninger <trenn@suse.de>
S: Maintained
F: tools/power/cpupower
CPUSETS
M: Paul Menage <menage@google.com>
W: http://www.bullopensource.org/cpuset/
@ -2637,9 +2643,8 @@ S: Maintained
F: arch/x86/math-emu/
FRAME RELAY DLCI/FRAD (Sangoma drivers too)
M: Mike McLagan <mike.mclagan@linux.org>
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
@ -3361,6 +3366,12 @@ F: drivers/net/ixgb/
F: drivers/net/ixgbe/
F: drivers/net/ixgbevf/
INTEL MRST PMU DRIVER
M: Len Brown <len.brown@intel.com>
L: linux-pm@lists.linux-foundation.org
S: Supported
F: arch/x86/platform/mrst/pmu.*
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
L: linux-wireless@vger.kernel.org
S: Orphan
@ -4403,10 +4414,10 @@ F: net/*/netfilter/
F: net/netfilter/
NETLABEL
M: Paul Moore <paul.moore@hp.com>
M: Paul Moore <paul@paul-moore.com>
W: http://netlabel.sf.net
L: netdev@vger.kernel.org
S: Supported
S: Maintained
F: Documentation/netlabel/
F: include/net/netlabel.h
F: net/netlabel/
@ -4451,7 +4462,6 @@ F: include/linux/netdevice.h
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
M: "Pekka Savola (ipv6)" <pekkas@netcore.fi>
M: James Morris <jmorris@namei.org>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
M: Patrick McHardy <kaber@trash.net>
@ -4464,7 +4474,7 @@ F: include/net/ip*
F: arch/x86/net/*
NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
M: Paul Moore <paul.moore@hp.com>
M: Paul Moore <paul@paul-moore.com>
L: netdev@vger.kernel.org
S: Maintained
@ -4716,6 +4726,7 @@ S: Maintained
F: drivers/of
F: include/linux/of*.h
K: of_get_property
K: of_match_table
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
@ -6312,6 +6323,7 @@ F: include/linux/sysv_fs.h
TARGET SUBSYSTEM
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc1
NAME = Sneaky Weasel
# *DOCUMENTATION*

View File

@ -178,4 +178,7 @@ config HAVE_ARCH_MUTEX_CPU_RELAX
config HAVE_RCU_TABLE_FREE
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
source "kernel/gcov/Kconfig"

View File

@ -14,6 +14,7 @@ config ALPHA
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,

View File

@ -112,9 +112,6 @@ EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
#endif
/* crypto hash */
EXPORT_SYMBOL(sha_transform);
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);

View File

@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
#include <asm/cacheflush.h>
#include <asm/leds.h>
@ -196,7 +197,8 @@ void cpu_idle(void)
cpu_relax();
} else {
stop_critical_timings();
pm_idle();
if (cpuidle_idle_call())
pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle

View File

@ -12,7 +12,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o sha1.o \
ucmpdi2.o lib1funcs.o div64.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o

View File

@ -1,211 +0,0 @@
/*
* linux/arch/arm/lib/sha1.S
*
* SHA transform optimized for ARM
*
* Copyright: (C) 2005 by Nicolas Pitre <nico@fluxnic.net>
* Created: September 17, 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The reference implementation for this code is linux/lib/sha1.c
*/
#include <linux/linkage.h>
.text
/*
* void sha_transform(__u32 *digest, const char *in, __u32 *W)
*
* Note: the "in" ptr may be unaligned.
*/
ENTRY(sha_transform)
stmfd sp!, {r4 - r8, lr}
@ for (i = 0; i < 16; i++)
@ W[i] = be32_to_cpu(in[i]);
#ifdef __ARMEB__
mov r4, r0
mov r0, r2
mov r2, #64
bl memcpy
mov r2, r0
mov r0, r4
#else
mov r3, r2
mov lr, #16
1: ldrb r4, [r1], #1
ldrb r5, [r1], #1
ldrb r6, [r1], #1
ldrb r7, [r1], #1
subs lr, lr, #1
orr r5, r5, r4, lsl #8
orr r6, r6, r5, lsl #8
orr r7, r7, r6, lsl #8
str r7, [r3], #4
bne 1b
#endif
@ for (i = 0; i < 64; i++)
@ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31);
sub r3, r2, #4
mov lr, #64
2: ldr r4, [r3, #4]!
subs lr, lr, #1
ldr r5, [r3, #8]
ldr r6, [r3, #32]
ldr r7, [r3, #52]
eor r4, r4, r5
eor r4, r4, r6
eor r4, r4, r7
mov r4, r4, ror #31
str r4, [r3, #64]
bne 2b
/*
* The SHA functions are:
*
* f1(B,C,D) = (D ^ (B & (C ^ D)))
* f2(B,C,D) = (B ^ C ^ D)
* f3(B,C,D) = ((B & C) | (D & (B | C)))
*
* Then the sub-blocks are processed as follows:
*
* A' = ror(A, 27) + f(B,C,D) + E + K + *W++
* B' = A
* C' = ror(B, 2)
* D' = C
* E' = D
*
* We therefore unroll each loop 5 times to avoid register shuffling.
* Also the ror for C (and also D and E which are successivelyderived
* from it) is applied in place to cut on an additional mov insn for
* each round.
*/
.macro sha_f1, A, B, C, D, E
ldr r3, [r2], #4
eor ip, \C, \D
add \E, r1, \E, ror #2
and ip, \B, ip, ror #2
add \E, \E, \A, ror #27
eor ip, ip, \D, ror #2
add \E, \E, r3
add \E, \E, ip
.endm
.macro sha_f2, A, B, C, D, E
ldr r3, [r2], #4
add \E, r1, \E, ror #2
eor ip, \B, \C, ror #2
add \E, \E, \A, ror #27
eor ip, ip, \D, ror #2
add \E, \E, r3
add \E, \E, ip
.endm
.macro sha_f3, A, B, C, D, E
ldr r3, [r2], #4
add \E, r1, \E, ror #2
orr ip, \B, \C, ror #2
add \E, \E, \A, ror #27
and ip, ip, \D, ror #2
add \E, \E, r3
and r3, \B, \C, ror #2
orr ip, ip, r3
add \E, \E, ip
.endm
ldmia r0, {r4 - r8}
mov lr, #4
ldr r1, .L_sha_K + 0
/* adjust initial values */
mov r6, r6, ror #30
mov r7, r7, ror #30
mov r8, r8, ror #30
3: subs lr, lr, #1
sha_f1 r4, r5, r6, r7, r8
sha_f1 r8, r4, r5, r6, r7
sha_f1 r7, r8, r4, r5, r6
sha_f1 r6, r7, r8, r4, r5
sha_f1 r5, r6, r7, r8, r4
bne 3b
ldr r1, .L_sha_K + 4
mov lr, #4
4: subs lr, lr, #1
sha_f2 r4, r5, r6, r7, r8
sha_f2 r8, r4, r5, r6, r7
sha_f2 r7, r8, r4, r5, r6
sha_f2 r6, r7, r8, r4, r5
sha_f2 r5, r6, r7, r8, r4
bne 4b
ldr r1, .L_sha_K + 8
mov lr, #4
5: subs lr, lr, #1
sha_f3 r4, r5, r6, r7, r8
sha_f3 r8, r4, r5, r6, r7
sha_f3 r7, r8, r4, r5, r6
sha_f3 r6, r7, r8, r4, r5
sha_f3 r5, r6, r7, r8, r4
bne 5b
ldr r1, .L_sha_K + 12
mov lr, #4
6: subs lr, lr, #1
sha_f2 r4, r5, r6, r7, r8
sha_f2 r8, r4, r5, r6, r7
sha_f2 r7, r8, r4, r5, r6
sha_f2 r6, r7, r8, r4, r5
sha_f2 r5, r6, r7, r8, r4
bne 6b
ldmia r0, {r1, r2, r3, ip, lr}
add r4, r1, r4
add r5, r2, r5
add r6, r3, r6, ror #2
add r7, ip, r7, ror #2
add r8, lr, r8, ror #2
stmia r0, {r4 - r8}
ldmfd sp!, {r4 - r8, pc}
ENDPROC(sha_transform)
.align 2
.L_sha_K:
.word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
/*
* void sha_init(__u32 *buf)
*/
.align 2
.L_sha_initial_digest:
.word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0
ENTRY(sha_init)
str lr, [sp, #-4]!
adr r1, .L_sha_initial_digest
ldmia r1, {r1, r2, r3, ip, lr}
stmia r0, {r1, r2, r3, ip, lr}
ldr pc, [sp], #4
ENDPROC(sha_init)

View File

@ -11,6 +11,7 @@ config ARCH_MSM7X00A
select MSM_SMD
select MSM_SMD_PKG3
select CPU_V6
select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@ -22,6 +23,7 @@ config ARCH_MSM7X30
select MSM_VIC
select CPU_V7
select MSM_GPIOMUX
select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@ -33,6 +35,7 @@ config ARCH_QSD8X50
select MSM_VIC
select CPU_V7
select MSM_GPIOMUX
select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@ -44,6 +47,7 @@ config ARCH_MSM8X60
select ARM_GIC
select CPU_V7
select MSM_V2_TLMM
select GPIO_MSM_V2
select MSM_GPIOMUX
select MSM_SCM if SMP

View File

@ -29,11 +29,3 @@ obj-$(CONFIG_ARCH_MSM8960) += board-msm8960.o devices-msm8960.o
obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o
ifdef CONFIG_MSM_V2_TLMM
ifndef CONFIG_ARCH_MSM8960
# TODO: TLMM Mapping issues need to be resolved
obj-y += gpio-v2.o
endif
else
obj-y += gpio.o
endif

View File

@ -1,376 +0,0 @@
/* linux/arch/arm/mach-msm/gpio.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/bitops.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include "gpio_hw.h"
#include "gpiomux.h"
#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
#define MSM_GPIO_BANK(bank, first, last) \
{ \
.regs = { \
.out = MSM_GPIO_OUT_##bank, \
.in = MSM_GPIO_IN_##bank, \
.int_status = MSM_GPIO_INT_STATUS_##bank, \
.int_clear = MSM_GPIO_INT_CLEAR_##bank, \
.int_en = MSM_GPIO_INT_EN_##bank, \
.int_edge = MSM_GPIO_INT_EDGE_##bank, \
.int_pos = MSM_GPIO_INT_POS_##bank, \
.oe = MSM_GPIO_OE_##bank, \
}, \
.chip = { \
.base = (first), \
.ngpio = (last) - (first) + 1, \
.get = msm_gpio_get, \
.set = msm_gpio_set, \
.direction_input = msm_gpio_direction_input, \
.direction_output = msm_gpio_direction_output, \
.to_irq = msm_gpio_to_irq, \
.request = msm_gpio_request, \
.free = msm_gpio_free, \
} \
}
#define MSM_GPIO_BROKEN_INT_CLEAR 1
struct msm_gpio_regs {
void __iomem *out;
void __iomem *in;
void __iomem *int_status;
void __iomem *int_clear;
void __iomem *int_en;
void __iomem *int_edge;
void __iomem *int_pos;
void __iomem *oe;
};
struct msm_gpio_chip {
spinlock_t lock;
struct gpio_chip chip;
struct msm_gpio_regs regs;
#if MSM_GPIO_BROKEN_INT_CLEAR
unsigned int_status_copy;
#endif
unsigned int both_edge_detect;
unsigned int int_enable[2]; /* 0: awake, 1: sleep */
};
static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
unsigned offset, unsigned on)
{
unsigned mask = BIT(offset);
unsigned val;
val = readl(msm_chip->regs.out);
if (on)
writel(val | mask, msm_chip->regs.out);
else
writel(val & ~mask, msm_chip->regs.out);
return 0;
}
static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
{
int loop_limit = 100;
unsigned pol, val, val2, intstat;
do {
val = readl(msm_chip->regs.in);
pol = readl(msm_chip->regs.int_pos);
pol = (pol & ~msm_chip->both_edge_detect) |
(~val & msm_chip->both_edge_detect);
writel(pol, msm_chip->regs.int_pos);
intstat = readl(msm_chip->regs.int_status);
val2 = readl(msm_chip->regs.in);
if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
return;
} while (loop_limit-- > 0);
printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
"failed to reach stable state %x != %x\n", val, val2);
}
static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
unsigned offset)
{
unsigned bit = BIT(offset);
#if MSM_GPIO_BROKEN_INT_CLEAR
/* Save interrupts that already triggered before we loose them. */
/* Any interrupt that triggers between the read of int_status */
/* and the write to int_clear will still be lost though. */
msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
msm_chip->int_status_copy &= ~bit;
#endif
writel(bit, msm_chip->regs.int_clear);
msm_gpio_update_both_edge_detect(msm_chip);
return 0;
}
static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct msm_gpio_chip *msm_chip;
unsigned long irq_flags;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
static int
msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct msm_gpio_chip *msm_chip;
unsigned long irq_flags;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
msm_gpio_write(msm_chip, offset, value);
writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct msm_gpio_chip *msm_chip;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
}
static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct msm_gpio_chip *msm_chip;
unsigned long irq_flags;
msm_chip = container_of(chip, struct msm_gpio_chip, chip);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
msm_gpio_write(msm_chip, offset, value);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
return MSM_GPIO_TO_INT(chip->base + offset);
}
#ifdef CONFIG_MSM_GPIOMUX
static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
{
return msm_gpiomux_get(chip->base + offset);
}
static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
{
msm_gpiomux_put(chip->base + offset);
}
#else
#define msm_gpio_request NULL
#define msm_gpio_free NULL
#endif
struct msm_gpio_chip msm_gpio_chips[] = {
#if defined(CONFIG_ARCH_MSM7X00A)
MSM_GPIO_BANK(0, 0, 15),
MSM_GPIO_BANK(1, 16, 42),
MSM_GPIO_BANK(2, 43, 67),
MSM_GPIO_BANK(3, 68, 94),
MSM_GPIO_BANK(4, 95, 106),
MSM_GPIO_BANK(5, 107, 121),
#elif defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X27)
MSM_GPIO_BANK(0, 0, 15),
MSM_GPIO_BANK(1, 16, 42),
MSM_GPIO_BANK(2, 43, 67),
MSM_GPIO_BANK(3, 68, 94),
MSM_GPIO_BANK(4, 95, 106),
MSM_GPIO_BANK(5, 107, 132),
#elif defined(CONFIG_ARCH_MSM7X30)
MSM_GPIO_BANK(0, 0, 15),
MSM_GPIO_BANK(1, 16, 43),
MSM_GPIO_BANK(2, 44, 67),
MSM_GPIO_BANK(3, 68, 94),
MSM_GPIO_BANK(4, 95, 106),
MSM_GPIO_BANK(5, 107, 133),
MSM_GPIO_BANK(6, 134, 150),
MSM_GPIO_BANK(7, 151, 181),
#elif defined(CONFIG_ARCH_QSD8X50)
MSM_GPIO_BANK(0, 0, 15),
MSM_GPIO_BANK(1, 16, 42),
MSM_GPIO_BANK(2, 43, 67),
MSM_GPIO_BANK(3, 68, 94),
MSM_GPIO_BANK(4, 95, 103),
MSM_GPIO_BANK(5, 104, 121),
MSM_GPIO_BANK(6, 122, 152),
MSM_GPIO_BANK(7, 153, 164),
#endif
};
static void msm_gpio_irq_ack(struct irq_data *d)
{
unsigned long irq_flags;
struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
msm_gpio_clear_detect_status(msm_chip,
d->irq - gpio_to_irq(msm_chip->chip.base));
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
static void msm_gpio_irq_mask(struct irq_data *d)
{
unsigned long irq_flags;
struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
/* level triggered interrupts are also latched */
if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
msm_gpio_clear_detect_status(msm_chip, offset);
msm_chip->int_enable[0] &= ~BIT(offset);
writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
static void msm_gpio_irq_unmask(struct irq_data *d)
{
unsigned long irq_flags;
struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
/* level triggered interrupts are also latched */
if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
msm_gpio_clear_detect_status(msm_chip, offset);
msm_chip->int_enable[0] |= BIT(offset);
writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
}
static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned long irq_flags;
struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
if (on)
msm_chip->int_enable[1] |= BIT(offset);
else
msm_chip->int_enable[1] &= ~BIT(offset);
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
unsigned long irq_flags;
struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
unsigned val, mask = BIT(offset);
spin_lock_irqsave(&msm_chip->lock, irq_flags);
val = readl(msm_chip->regs.int_edge);
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
writel(val | mask, msm_chip->regs.int_edge);
__irq_set_handler_locked(d->irq, handle_edge_irq);
} else {
writel(val & ~mask, msm_chip->regs.int_edge);
__irq_set_handler_locked(d->irq, handle_level_irq);
}
if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
msm_chip->both_edge_detect |= mask;
msm_gpio_update_both_edge_detect(msm_chip);
} else {
msm_chip->both_edge_detect &= ~mask;
val = readl(msm_chip->regs.int_pos);
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
writel(val | mask, msm_chip->regs.int_pos);
else
writel(val & ~mask, msm_chip->regs.int_pos);
}
spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
return 0;
}
static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
int i, j, mask;
unsigned val;
for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
val = readl(msm_chip->regs.int_status);
val &= msm_chip->int_enable[0];
while (val) {
mask = val & -val;
j = fls(mask) - 1;
/* printk("%s %08x %08x bit %d gpio %d irq %d\n",
__func__, v, m, j, msm_chip->chip.start + j,
FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
val &= ~mask;
generic_handle_irq(FIRST_GPIO_IRQ +
msm_chip->chip.base + j);
}
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
}
static struct irq_chip msm_gpio_irq_chip = {
.name = "msmgpio",
.irq_ack = msm_gpio_irq_ack,
.irq_mask = msm_gpio_irq_mask,
.irq_unmask = msm_gpio_irq_unmask,
.irq_set_wake = msm_gpio_irq_set_wake,
.irq_set_type = msm_gpio_irq_set_type,
};
static int __init msm_init_gpio(void)
{
int i, j = 0;
for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
if (i - FIRST_GPIO_IRQ >=
msm_gpio_chips[j].chip.base +
msm_gpio_chips[j].chip.ngpio)
j++;
irq_set_chip_data(i, &msm_gpio_chips[j]);
irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
spin_lock_init(&msm_gpio_chips[i].lock);
writel(0, msm_gpio_chips[i].regs.int_en);
gpiochip_add(&msm_gpio_chips[i].chip);
}
irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
irq_set_irq_wake(INT_GPIO_GROUP1, 1);
irq_set_irq_wake(INT_GPIO_GROUP2, 2);
return 0;
}
postcore_initcall(msm_init_gpio);

View File

@ -1,278 +0,0 @@
/* arch/arm/mach-msm/gpio_hw.h
*
* Copyright (C) 2007 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_H
#define __ARCH_ARM_MACH_MSM_GPIO_HW_H
#include <mach/msm_iomap.h>
/* see 80-VA736-2 Rev C pp 695-751
**
** These are actually the *shadow* gpio registers, since the
** real ones (which allow full access) are only available to the
** ARM9 side of the world.
**
** Since the _BASE need to be page-aligned when we're mapping them
** to virtual addresses, adjust for the additional offset in these
** macros.
*/
#if defined(CONFIG_ARCH_MSM7X30)
#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
#else
#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
#endif
#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_MSM7X25) ||\
defined(CONFIG_ARCH_MSM7X27)
/* output value */
#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 42-16 */
#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-43 */
#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 107-121 */
/* same pin map as above, output enable */
#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10)
#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14)
#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18)
#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54)
/* same pin map as above, input read */
#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34)
#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38)
#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40)
#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44)
/* same pin map as above, 1=edge 0=level interrup */
#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
/* same pin map as above, 1=positive 0=negative */
#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
/* same pin map as above, interrupt enable */
#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
/* same pin map as above, write 1 to clear interrupt */
#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
/* same pin map as above, 1=interrupt pending */
#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
#endif
#if defined(CONFIG_ARCH_QSD8X50)
/* output value */
#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 42-16 */
#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-43 */
#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 103-95 */
#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x10) /* gpio 121-104 */
#define MSM_GPIO_OUT_6 MSM_GPIO1_REG(0x14) /* gpio 152-122 */
#define MSM_GPIO_OUT_7 MSM_GPIO1_REG(0x18) /* gpio 164-153 */
/* same pin map as above, output enable */
#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x20)
#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x24)
#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x28)
#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x2C)
#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x30)
#define MSM_GPIO_OE_6 MSM_GPIO1_REG(0x34)
#define MSM_GPIO_OE_7 MSM_GPIO1_REG(0x38)
/* same pin map as above, input read */
#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x50)
#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x54)
#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x58)
#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x5C)
#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x60)
#define MSM_GPIO_IN_6 MSM_GPIO1_REG(0x64)
#define MSM_GPIO_IN_7 MSM_GPIO1_REG(0x68)
/* same pin map as above, 1=edge 0=level interrup */
#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x70)
#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x74)
#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x78)
#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x7C)
#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0x80)
#define MSM_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0x84)
#define MSM_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x88)
/* same pin map as above, 1=positive 0=negative */
#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x90)
#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x94)
#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x98)
#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x9C)
#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xA0)
#define MSM_GPIO_INT_POS_6 MSM_GPIO1_REG(0xA4)
#define MSM_GPIO_INT_POS_7 MSM_GPIO1_REG(0xA8)
/* same pin map as above, interrupt enable */
#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0xB0)
#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0xB4)
#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0xB8)
#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0xBC)
#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xC0)
#define MSM_GPIO_INT_EN_6 MSM_GPIO1_REG(0xC4)
#define MSM_GPIO_INT_EN_7 MSM_GPIO1_REG(0xC8)
/* same pin map as above, write 1 to clear interrupt */
#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0xD0)
#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0xD4)
#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0xD8)
#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0xDC)
#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xE0)
#define MSM_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xE4)
#define MSM_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0xE8)
/* same pin map as above, 1=interrupt pending */
#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xF0)
#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xF4)
#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xF8)
#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xFC)
#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0x100)
#define MSM_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0x104)
#define MSM_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x108)
#endif
#if defined(CONFIG_ARCH_MSM7X30)
/* output value */
#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
#define MSM_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
#define MSM_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
/* same pin map as above, output enable */
#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10)
#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14)
#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18)
#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54)
#define MSM_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
#define MSM_GPIO_OE_7 MSM_GPIO1_REG(0x218)
/* same pin map as above, input read */
#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34)
#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38)
#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40)
#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44)
#define MSM_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
#define MSM_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
/* same pin map as above, 1=edge 0=level interrup */
#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
#define MSM_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
#define MSM_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
/* same pin map as above, 1=positive 0=negative */
#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
#define MSM_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
#define MSM_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
/* same pin map as above, interrupt enable */
#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
#define MSM_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
#define MSM_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
/* same pin map as above, write 1 to clear interrupt */
#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
#define MSM_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
#define MSM_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
/* same pin map as above, 1=interrupt pending */
#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
#define MSM_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
#define MSM_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
#endif
#endif

View File

@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <mach/msm_gpiomux.h>
#if defined(CONFIG_MSM_V2_TLMM)
#include "gpiomux-v2.h"
@ -71,12 +72,6 @@ enum {
*/
extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
/* Increment a gpio's reference count, possibly activating the line. */
int __must_check msm_gpiomux_get(unsigned gpio);
/* Decrement a gpio's reference count, possibly suspending the line. */
int msm_gpiomux_put(unsigned gpio);
/* Install a new configuration to the gpio line. To avoid overwriting
* a configuration, leave the VALID bit out.
*/
@ -94,16 +89,6 @@ int msm_gpiomux_write(unsigned gpio,
*/
void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
static inline int __must_check msm_gpiomux_get(unsigned gpio)
{
return -ENOSYS;
}
static inline int msm_gpiomux_put(unsigned gpio)
{
return -ENOSYS;
}
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended)

View File

@ -0,0 +1,38 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _LINUX_MSM_GPIOMUX_H
#define _LINUX_MSM_GPIOMUX_H
#ifdef CONFIG_MSM_GPIOMUX
/* Increment a gpio's reference count, possibly activating the line. */
int __must_check msm_gpiomux_get(unsigned gpio);
/* Decrement a gpio's reference count, possibly suspending the line. */
int msm_gpiomux_put(unsigned gpio);
#else
static inline int __must_check msm_gpiomux_get(unsigned gpio)
{
return -ENOSYS;
}
static inline int msm_gpiomux_put(unsigned gpio)
{
return -ENOSYS;
}
#endif
#endif /* _LINUX_MSM_GPIOMUX_H */

View File

@ -55,13 +55,11 @@
#define MSM_DMOV_PHYS 0xA9700000
#define MSM_DMOV_SIZE SZ_4K
#define MSM_GPIO1_BASE IOMEM(0xE0003000)
#define MSM_GPIO1_PHYS 0xA9200000
#define MSM_GPIO1_SIZE SZ_4K
#define MSM7X00_GPIO1_PHYS 0xA9200000
#define MSM7X00_GPIO1_SIZE SZ_4K
#define MSM_GPIO2_BASE IOMEM(0xE0004000)
#define MSM_GPIO2_PHYS 0xA9300000
#define MSM_GPIO2_SIZE SZ_4K
#define MSM7X00_GPIO2_PHYS 0xA9300000
#define MSM7X00_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xA8600000

View File

@ -46,13 +46,11 @@
#define MSM_DMOV_PHYS 0xAC400000
#define MSM_DMOV_SIZE SZ_4K
#define MSM_GPIO1_BASE IOMEM(0xE0003000)
#define MSM_GPIO1_PHYS 0xAC001000
#define MSM_GPIO1_SIZE SZ_4K
#define MSM7X30_GPIO1_PHYS 0xAC001000
#define MSM7X30_GPIO1_SIZE SZ_4K
#define MSM_GPIO2_BASE IOMEM(0xE0004000)
#define MSM_GPIO2_PHYS 0xAC101000
#define MSM_GPIO2_SIZE SZ_4K
#define MSM7X30_GPIO2_PHYS 0xAC101000
#define MSM7X30_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xAB800000

View File

@ -46,13 +46,11 @@
#define MSM_DMOV_PHYS 0xA9700000
#define MSM_DMOV_SIZE SZ_4K
#define MSM_GPIO1_BASE IOMEM(0xE0003000)
#define MSM_GPIO1_PHYS 0xA9000000
#define MSM_GPIO1_SIZE SZ_4K
#define QSD8X50_GPIO1_PHYS 0xA9000000
#define QSD8X50_GPIO1_SIZE SZ_4K
#define MSM_GPIO2_BASE IOMEM(0xE0004000)
#define MSM_GPIO2_PHYS 0xA9100000
#define MSM_GPIO2_SIZE SZ_4K
#define QSD8X50_GPIO2_PHYS 0xA9100000
#define QSD8X50_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xA8600000

View File

@ -61,5 +61,7 @@
#define MSM_QGIC_CPU_BASE IOMEM(0xF0001000)
#define MSM_TMR_BASE IOMEM(0xF0200000)
#define MSM_TMR0_BASE IOMEM(0xF0201000)
#define MSM_GPIO1_BASE IOMEM(0xE0003000)
#define MSM_GPIO2_BASE IOMEM(0xE0004000)
#endif

View File

@ -43,8 +43,8 @@ static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, MSM7X00),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
MSM_CHIP_DEVICE(GPIO1, MSM7X00),
MSM_CHIP_DEVICE(GPIO2, MSM7X00),
MSM_DEVICE(CLK_CTL),
#ifdef CONFIG_MSM_DEBUG_UART
MSM_DEVICE(DEBUG_UART),
@ -76,8 +76,8 @@ static struct map_desc qsd8x50_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, QSD8X50),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
MSM_CHIP_DEVICE(GPIO1, QSD8X50),
MSM_CHIP_DEVICE(GPIO2, QSD8X50),
MSM_DEVICE(CLK_CTL),
MSM_DEVICE(SIRC),
MSM_DEVICE(SCPLL),
@ -135,8 +135,8 @@ static struct map_desc msm7x30_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, MSM7X30),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
MSM_CHIP_DEVICE(GPIO1, MSM7X30),
MSM_CHIP_DEVICE(GPIO2, MSM7X30),
MSM_DEVICE(CLK_CTL),
MSM_DEVICE(CLK_CTL_SH2),
MSM_DEVICE(AD5),

View File

@ -25,6 +25,7 @@
#include <video/omapdss.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
static struct platform_device omap_display_device = {
.name = "omapdss",
@ -42,20 +43,6 @@ static struct omap_device_pm_latency omap_dss_latency[] = {
},
};
/* oh_core is used for getting opt-clocks */
static struct omap_hwmod *oh_core;
static bool opt_clock_available(const char *clk_role)
{
int i;
for (i = 0; i < oh_core->opt_clks_cnt; i++) {
if (!strcmp(oh_core->opt_clks[i].role, clk_role))
return true;
}
return false;
}
struct omap_dss_hwmod_data {
const char *oh_name;
const char *dev_name;
@ -109,16 +96,9 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
}
/* opt_clks are always associated with dss hwmod */
oh_core = omap_hwmod_lookup("dss_core");
if (!oh_core) {
pr_err("Could not look up dss_core.\n");
return -ENODEV;
}
pdata.board_data = board_data;
pdata.board_data->get_last_off_on_transaction_id = NULL;
pdata.opt_clock_available = opt_clock_available;
pdata.board_data->get_context_loss_count =
omap_pm_get_dev_context_loss_count;
for (i = 0; i < oh_count; i++) {
oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);

View File

@ -259,9 +259,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[CMMSTP003] = MSTP(&r_clk, CMMSTPCR0, 3, 0), /* KEYSC */
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),

View File

@ -561,10 +561,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("dv_clki_div2_clk", &sh7372_dv_clki_div2_clk),

View File

@ -267,9 +267,6 @@ static struct clk mstp_clks[] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),

View File

@ -306,10 +306,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),

View File

@ -10,6 +10,7 @@ config AVR32
select GENERIC_IRQ_PROBE
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular

View File

@ -158,7 +158,7 @@ static int sync_serial_open(struct inode *inode, struct file *file);
static int sync_serial_release(struct inode *inode, struct file *file);
static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
static int sync_serial_ioctl(struct file *file,
static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static ssize_t sync_serial_write(struct file *file, const char *buf,
size_t count, loff_t *ppos);
@ -625,11 +625,11 @@ static int sync_serial_open(struct inode *inode, struct file *file)
*R_IRQ_MASK1_SET = 1 << port->data_avail_bit;
DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev));
}
ret = 0;
err = 0;
out:
mutex_unlock(&sync_serial_mutex);
return ret;
return err;
}
static int sync_serial_release(struct inode *inode, struct file *file)

View File

@ -20,6 +20,9 @@
#define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
#define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
extern void kgdb_init(void);
extern void breakpoint(void);
/* don't use set_int_vector, it bypasses the linux interrupt handlers. it is
* global just so that the kernel gdb can use it.
*/

View File

@ -11,8 +11,6 @@
#ifdef __KERNEL__
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifndef __ASSEMBLY__
#include <asm/types.h>
#include <asm/processor.h>
@ -67,8 +65,10 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define alloc_thread_info_node(tsk, node) \
((struct thread_info *) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#endif /* !__ASSEMBLY__ */

View File

@ -7,6 +7,7 @@ config FRV
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG
config ZONE_DMA
bool

View File

@ -27,6 +27,8 @@ config IA64
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@ -89,6 +91,9 @@ config GENERIC_TIME_VSYSCALL
config HAVE_SETUP_PER_CPU_AREA
def_bool y
config GENERIC_GPIO
def_bool y
config DMI
bool
default y

View File

@ -0,0 +1,55 @@
/*
* Generic GPIO API implementation for IA-64.
*
* A stright copy of that for PowerPC which was:
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _ASM_IA64_GPIO_H
#define _ASM_IA64_GPIO_H
#include <linux/errno.h>
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
/*
* We don't (yet) implement inlined/rapid versions for on-chip gpios.
* Just call gpiolib.
*/
static inline int gpio_get_value(unsigned int gpio)
{
return __gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned int gpio, int value)
{
__gpio_set_value(gpio, value);
}
static inline int gpio_cansleep(unsigned int gpio)
{
return __gpio_cansleep(gpio);
}
static inline int gpio_to_irq(unsigned int gpio)
{
return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
return -EINVAL;
}
#endif /* CONFIG_GPIOLIB */
#endif /* _ASM_IA64_GPIO_H */

View File

@ -156,7 +156,7 @@ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
unsigned long attr, unsigned long data_size, \
u32 attr, unsigned long data_size, \
void *data) \
{ \
struct ia64_fpreg fr[6]; \

View File

@ -6,6 +6,7 @@ config M68K
select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS if !MMU
select GENERIC_IRQ_SHOW if !MMU
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
config RWSEM_GENERIC_SPINLOCK
bool

View File

@ -372,12 +372,6 @@ config AMIGA_PCMCIA
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
config STRAM_PROC
bool "ST-RAM statistics in /proc"
depends on ATARI
help
Say Y here to report ST-RAM usage statistics in /proc/stram.
config HEARTBEAT
bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300

View File

@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/page.h>
#include <asm/amigahw.h>
@ -23,111 +24,100 @@ unsigned long amiga_chip_size;
EXPORT_SYMBOL(amiga_chip_size);
static struct resource chipram_res = {
.name = "Chip RAM", .start = CHIP_PHYSADDR
.name = "Chip RAM", .start = CHIP_PHYSADDR
};
static unsigned long chipavail;
static atomic_t chipavail;
void __init amiga_chip_init(void)
{
if (!AMIGAHW_PRESENT(CHIP_RAM))
return;
if (!AMIGAHW_PRESENT(CHIP_RAM))
return;
chipram_res.end = amiga_chip_size-1;
request_resource(&iomem_resource, &chipram_res);
chipram_res.end = CHIP_PHYSADDR + amiga_chip_size - 1;
request_resource(&iomem_resource, &chipram_res);
chipavail = amiga_chip_size;
atomic_set(&chipavail, amiga_chip_size);
}
void *amiga_chip_alloc(unsigned long size, const char *name)
{
struct resource *res;
struct resource *res;
void *p;
/* round up */
size = PAGE_ALIGN(size);
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res)
return NULL;
#ifdef DEBUG
printk("amiga_chip_alloc: allocate %ld bytes\n", size);
#endif
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res)
return NULL;
res->name = name;
res->name = name;
p = amiga_chip_alloc_res(size, res);
if (!p) {
kfree(res);
return NULL;
}
if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
kfree(res);
return NULL;
}
chipavail -= size;
#ifdef DEBUG
printk("amiga_chip_alloc: returning %lx\n", res->start);
#endif
return (void *)ZTWO_VADDR(res->start);
return p;
}
EXPORT_SYMBOL(amiga_chip_alloc);
/*
* Warning:
* amiga_chip_alloc_res is meant only for drivers that need to allocate
* Chip RAM before kmalloc() is functional. As a consequence, those
* drivers must not free that Chip RAM afterwards.
*/
/*
* Warning:
* amiga_chip_alloc_res is meant only for drivers that need to
* allocate Chip RAM before kmalloc() is functional. As a consequence,
* those drivers must not free that Chip RAM afterwards.
*/
void * __init amiga_chip_alloc_res(unsigned long size, struct resource *res)
void *amiga_chip_alloc_res(unsigned long size, struct resource *res)
{
unsigned long start;
int error;
/* round up */
size = PAGE_ALIGN(size);
/* dmesg into chipmem prefers memory at the safe end */
start = CHIP_PHYSADDR + chipavail - size;
/* round up */
size = PAGE_ALIGN(size);
#ifdef DEBUG
printk("amiga_chip_alloc_res: allocate %ld bytes\n", size);
#endif
if (allocate_resource(&chipram_res, res, size, start, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
printk("amiga_chip_alloc_res: first alloc failed!\n");
if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0)
return NULL;
}
chipavail -= size;
#ifdef DEBUG
printk("amiga_chip_alloc_res: returning %lx\n", res->start);
#endif
return (void *)ZTWO_VADDR(res->start);
pr_debug("amiga_chip_alloc_res: allocate %lu bytes\n", size);
error = allocate_resource(&chipram_res, res, size, 0, UINT_MAX,
PAGE_SIZE, NULL, NULL);
if (error < 0) {
pr_err("amiga_chip_alloc_res: allocate_resource() failed %d!\n",
error);
return NULL;
}
atomic_sub(size, &chipavail);
pr_debug("amiga_chip_alloc_res: returning %pR\n", res);
return (void *)ZTWO_VADDR(res->start);
}
void amiga_chip_free(void *ptr)
{
unsigned long start = ZTWO_PADDR(ptr);
struct resource **p, *res;
unsigned long size;
unsigned long start = ZTWO_PADDR(ptr);
struct resource *res;
unsigned long size;
for (p = &chipram_res.child; (res = *p); p = &res->sibling) {
if (res->start != start)
continue;
*p = res->sibling;
size = res->end-start;
#ifdef DEBUG
printk("amiga_chip_free: free %ld bytes at %p\n", size, ptr);
#endif
chipavail += size;
res = lookup_resource(&chipram_res, start);
if (!res) {
pr_err("amiga_chip_free: trying to free nonexistent region at "
"%p\n", ptr);
return;
}
size = resource_size(res);
pr_debug("amiga_chip_free: free %lu bytes at %p\n", size, ptr);
atomic_add(size, &chipavail);
release_resource(res);
kfree(res);
return;
}
printk("amiga_chip_free: trying to free nonexistent region at %p\n", ptr);
}
EXPORT_SYMBOL(amiga_chip_free);
unsigned long amiga_chip_avail(void)
{
#ifdef DEBUG
printk("amiga_chip_avail : %ld bytes\n", chipavail);
#endif
return chipavail;
unsigned long n = atomic_read(&chipavail);
pr_debug("amiga_chip_avail : %lu bytes\n", n);
return n;
}
EXPORT_SYMBOL(amiga_chip_avail);

View File

@ -1,5 +1,5 @@
/*
* arch/m68k/atari/stram.c: Functions for ST-RAM allocations
* Functions for ST-RAM allocations
*
* Copyright 1994-97 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
*
@ -30,91 +30,35 @@
#include <asm/atari_stram.h>
#include <asm/io.h>
#undef DEBUG
#ifdef DEBUG
#define DPRINTK(fmt,args...) printk( fmt, ##args )
#else
#define DPRINTK(fmt,args...)
#endif
#if defined(CONFIG_PROC_FS) && defined(CONFIG_STRAM_PROC)
/* abbrev for the && above... */
#define DO_PROC
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
/*
* ++roman:
*
* New version of ST-Ram buffer allocation. Instead of using the
* 1 MB - 4 KB that remain when the ST-Ram chunk starts at $1000
* (1 MB granularity!), such buffers are reserved like this:
*
* - If the kernel resides in ST-Ram anyway, we can take the buffer
* from behind the current kernel data space the normal way
* (incrementing start_mem).
*
* - If the kernel is in TT-Ram, stram_init() initializes start and
* end of the available region. Buffers are allocated from there
* and mem_init() later marks the such used pages as reserved.
* Since each TT-Ram chunk is at least 4 MB in size, I hope there
* won't be an overrun of the ST-Ram region by normal kernel data
* space.
*
* For that, ST-Ram may only be allocated while kernel initialization
* is going on, or exactly: before mem_init() is called. There is also
* no provision now for freeing ST-Ram buffers. It seems that isn't
* really needed.
*
* The ST-RAM allocator allocates memory from a pool of reserved ST-RAM of
* configurable size, set aside on ST-RAM init.
* As long as this pool is not exhausted, allocation of real ST-RAM can be
* guaranteed.
*/
/* Start and end (virtual) of ST-RAM */
static void *stram_start, *stram_end;
/* set after memory_init() executed and allocations via start_mem aren't
* possible anymore */
static int mem_init_done;
/* set if kernel is in ST-RAM */
static int kernel_in_stram;
typedef struct stram_block {
struct stram_block *next;
void *start;
unsigned long size;
unsigned flags;
const char *owner;
} BLOCK;
static struct resource stram_pool = {
.name = "ST-RAM Pool"
};
/* values for flags field */
#define BLOCK_FREE 0x01 /* free structure in the BLOCKs pool */
#define BLOCK_KMALLOCED 0x02 /* structure allocated by kmalloc() */
#define BLOCK_GFP 0x08 /* block allocated with __get_dma_pages() */
static unsigned long pool_size = 1024*1024;
/* list of allocated blocks */
static BLOCK *alloc_list;
/* We can't always use kmalloc() to allocate BLOCK structures, since
* stram_alloc() can be called rather early. So we need some pool of
* statically allocated structures. 20 of them is more than enough, so in most
* cases we never should need to call kmalloc(). */
#define N_STATIC_BLOCKS 20
static BLOCK static_blocks[N_STATIC_BLOCKS];
static int __init atari_stram_setup(char *arg)
{
if (!MACH_IS_ATARI)
return 0;
/***************************** Prototypes *****************************/
pool_size = memparse(arg, NULL);
return 0;
}
static BLOCK *add_region( void *addr, unsigned long size );
static BLOCK *find_region( void *addr );
static int remove_region( BLOCK *block );
early_param("stram_pool", atari_stram_setup);
/************************* End of Prototypes **************************/
/* ------------------------------------------------------------------------ */
/* Public Interface */
/* ------------------------------------------------------------------------ */
/*
* This init function is called very early by atari/config.c
@ -123,25 +67,23 @@ static int remove_region( BLOCK *block );
void __init atari_stram_init(void)
{
int i;
void *stram_start;
/* initialize static blocks */
for( i = 0; i < N_STATIC_BLOCKS; ++i )
static_blocks[i].flags = BLOCK_FREE;
/* determine whether kernel code resides in ST-RAM (then ST-RAM is the
* first memory block at virtual 0x0) */
/*
* determine whether kernel code resides in ST-RAM
* (then ST-RAM is the first memory block at virtual 0x0)
*/
stram_start = phys_to_virt(0);
kernel_in_stram = (stram_start == 0);
for( i = 0; i < m68k_num_memory; ++i ) {
for (i = 0; i < m68k_num_memory; ++i) {
if (m68k_memory[i].addr == 0) {
/* skip first 2kB or page (supervisor-only!) */
stram_end = stram_start + m68k_memory[i].size;
return;
}
}
/* Should never come here! (There is always ST-Ram!) */
panic( "atari_stram_init: no ST-RAM found!" );
panic("atari_stram_init: no ST-RAM found!");
}
@ -151,226 +93,68 @@ void __init atari_stram_init(void)
*/
void __init atari_stram_reserve_pages(void *start_mem)
{
/* always reserve first page of ST-RAM, the first 2 kB are
* supervisor-only! */
/*
* always reserve first page of ST-RAM, the first 2 KiB are
* supervisor-only!
*/
if (!kernel_in_stram)
reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
}
stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size);
stram_pool.end = stram_pool.start + pool_size - 1;
request_resource(&iomem_resource, &stram_pool);
void atari_stram_mem_init_hook (void)
{
mem_init_done = 1;
pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n",
pool_size, &stram_pool);
}
/*
* This is main public interface: somehow allocate a ST-RAM block
*
* - If we're before mem_init(), we have to make a static allocation. The
* region is taken in the kernel data area (if the kernel is in ST-RAM) or
* from the start of ST-RAM (if the kernel is in TT-RAM) and added to the
* rsvd_stram_* region. The ST-RAM is somewhere in the middle of kernel
* address space in the latter case.
*
* - If mem_init() already has been called, try with __get_dma_pages().
* This has the disadvantage that it's very hard to get more than 1 page,
* and it is likely to fail :-(
*
*/
void *atari_stram_alloc(long size, const char *owner)
void *atari_stram_alloc(unsigned long size, const char *owner)
{
void *addr = NULL;
BLOCK *block;
int flags;
struct resource *res;
int error;
DPRINTK("atari_stram_alloc(size=%08lx,owner=%s)\n", size, owner);
pr_debug("atari_stram_alloc: allocate %lu bytes\n", size);
if (!mem_init_done)
return alloc_bootmem_low(size);
else {
/* After mem_init(): can only resort to __get_dma_pages() */
addr = (void *)__get_dma_pages(GFP_KERNEL, get_order(size));
flags = BLOCK_GFP;
DPRINTK( "atari_stram_alloc: after mem_init, "
"get_pages=%p\n", addr );
/* round up */
size = PAGE_ALIGN(size);
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res)
return NULL;
res->name = owner;
error = allocate_resource(&stram_pool, res, size, 0, UINT_MAX,
PAGE_SIZE, NULL, NULL);
if (error < 0) {
pr_err("atari_stram_alloc: allocate_resource() failed %d!\n",
error);
kfree(res);
return NULL;
}
if (addr) {
if (!(block = add_region( addr, size ))) {
/* out of memory for BLOCK structure :-( */
DPRINTK( "atari_stram_alloc: out of mem for BLOCK -- "
"freeing again\n" );
free_pages((unsigned long)addr, get_order(size));
return( NULL );
}
block->owner = owner;
block->flags |= flags;
}
return( addr );
pr_debug("atari_stram_alloc: returning %pR\n", res);
return (void *)res->start;
}
EXPORT_SYMBOL(atari_stram_alloc);
void atari_stram_free( void *addr )
void atari_stram_free(void *addr)
{
BLOCK *block;
unsigned long start = (unsigned long)addr;
struct resource *res;
unsigned long size;
DPRINTK( "atari_stram_free(addr=%p)\n", addr );
if (!(block = find_region( addr ))) {
printk( KERN_ERR "Attempt to free non-allocated ST-RAM block at %p "
"from %p\n", addr, __builtin_return_address(0) );
res = lookup_resource(&stram_pool, start);
if (!res) {
pr_err("atari_stram_free: trying to free nonexistent region "
"at %p\n", addr);
return;
}
DPRINTK( "atari_stram_free: found block (%p): size=%08lx, owner=%s, "
"flags=%02x\n", block, block->size, block->owner, block->flags );
if (!(block->flags & BLOCK_GFP))
goto fail;
DPRINTK("atari_stram_free: is kmalloced, order_size=%d\n",
get_order(block->size));
free_pages((unsigned long)addr, get_order(block->size));
remove_region( block );
return;
fail:
printk( KERN_ERR "atari_stram_free: cannot free block at %p "
"(called from %p)\n", addr, __builtin_return_address(0) );
size = resource_size(res);
pr_debug("atari_stram_free: free %lu bytes at %p\n", size, addr);
release_resource(res);
kfree(res);
}
EXPORT_SYMBOL(atari_stram_free);
/* ------------------------------------------------------------------------ */
/* Region Management */
/* ------------------------------------------------------------------------ */
/* insert a region into the alloced list (sorted) */
static BLOCK *add_region( void *addr, unsigned long size )
{
BLOCK **p, *n = NULL;
int i;
for( i = 0; i < N_STATIC_BLOCKS; ++i ) {
if (static_blocks[i].flags & BLOCK_FREE) {
n = &static_blocks[i];
n->flags = 0;
break;
}
}
if (!n && mem_init_done) {
/* if statics block pool exhausted and we can call kmalloc() already
* (after mem_init()), try that */
n = kmalloc( sizeof(BLOCK), GFP_KERNEL );
if (n)
n->flags = BLOCK_KMALLOCED;
}
if (!n) {
printk( KERN_ERR "Out of memory for ST-RAM descriptor blocks\n" );
return( NULL );
}
n->start = addr;
n->size = size;
for( p = &alloc_list; *p; p = &((*p)->next) )
if ((*p)->start > addr) break;
n->next = *p;
*p = n;
return( n );
}
/* find a region (by start addr) in the alloced list */
static BLOCK *find_region( void *addr )
{
BLOCK *p;
for( p = alloc_list; p; p = p->next ) {
if (p->start == addr)
return( p );
if (p->start > addr)
break;
}
return( NULL );
}
/* remove a block from the alloced list */
static int remove_region( BLOCK *block )
{
BLOCK **p;
for( p = &alloc_list; *p; p = &((*p)->next) )
if (*p == block) break;
if (!*p)
return( 0 );
*p = block->next;
if (block->flags & BLOCK_KMALLOCED)
kfree( block );
else
block->flags |= BLOCK_FREE;
return( 1 );
}
/* ------------------------------------------------------------------------ */
/* /proc statistics file stuff */
/* ------------------------------------------------------------------------ */
#ifdef DO_PROC
#define PRINT_PROC(fmt,args...) seq_printf( m, fmt, ##args )
static int stram_proc_show(struct seq_file *m, void *v)
{
BLOCK *p;
PRINT_PROC("Total ST-RAM: %8u kB\n",
(stram_end - stram_start) >> 10);
PRINT_PROC( "Allocated regions:\n" );
for( p = alloc_list; p; p = p->next ) {
PRINT_PROC("0x%08lx-0x%08lx: %s (",
virt_to_phys(p->start),
virt_to_phys(p->start+p->size-1),
p->owner);
if (p->flags & BLOCK_GFP)
PRINT_PROC( "page-alloced)\n" );
else
PRINT_PROC( "??)\n" );
}
return 0;
}
static int stram_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, stram_proc_show, NULL);
}
static const struct file_operations stram_proc_fops = {
.open = stram_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_stram_init(void)
{
proc_create("stram", 0, NULL, &stram_proc_fops);
return 0;
}
module_init(proc_stram_init);
#endif
/*
* Local variables:
* c-indent-level: 4
* tab-width: 4
* End:
*/

View File

@ -6,12 +6,11 @@
*/
/* public interface */
void *atari_stram_alloc(long size, const char *owner);
void *atari_stram_alloc(unsigned long size, const char *owner);
void atari_stram_free(void *);
/* functions called internally by other parts of the kernel */
void atari_stram_init(void);
void atari_stram_reserve_pages(void *start_mem);
void atari_stram_mem_init_hook (void);
#endif /*_M68K_ATARI_STRAM_H */

View File

@ -399,8 +399,8 @@ struct CODEC
#define CODEC_OVERFLOW_LEFT 2
u_char unused2, unused3, unused4, unused5;
u_char gpio_directions;
#define GPIO_IN 0
#define GPIO_OUT 1
#define CODEC_GPIO_IN 0
#define CODEC_GPIO_OUT 1
u_char unused6;
u_char gpio_data;
};

View File

@ -216,7 +216,9 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
#ifndef CONFIG_SUN3
int i;
#endif
/* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)_end);

View File

@ -105,9 +105,6 @@ fp_fetoxm1(struct fp_ext *dest, struct fp_ext *src)
fp_monadic_check(dest, src);
if (IS_ZERO(dest))
return dest;
return dest;
}

View File

@ -19,246 +19,6 @@
#ifndef MULTI_ARITH_H
#define MULTI_ARITH_H
#if 0 /* old code... */
/* Unsigned only, because we don't need signs to multiply and divide. */
typedef unsigned int int128[4];
/* Word order */
enum {
MSW128,
NMSW128,
NLSW128,
LSW128
};
/* big-endian */
#define LO_WORD(ll) (((unsigned int *) &ll)[1])
#define HI_WORD(ll) (((unsigned int *) &ll)[0])
/* Convenience functions to stuff various integer values into int128s */
static inline void zero128(int128 a)
{
a[LSW128] = a[NLSW128] = a[NMSW128] = a[MSW128] = 0;
}
/* Human-readable word order in the arguments */
static inline void set128(unsigned int i3, unsigned int i2, unsigned int i1,
unsigned int i0, int128 a)
{
a[LSW128] = i0;
a[NLSW128] = i1;
a[NMSW128] = i2;
a[MSW128] = i3;
}
/* Convenience functions (for testing as well) */
static inline void int64_to_128(unsigned long long src, int128 dest)
{
dest[LSW128] = (unsigned int) src;
dest[NLSW128] = src >> 32;
dest[NMSW128] = dest[MSW128] = 0;
}
static inline void int128_to_64(const int128 src, unsigned long long *dest)
{
*dest = src[LSW128] | (long long) src[NLSW128] << 32;
}
static inline void put_i128(const int128 a)
{
printk("%08x %08x %08x %08x\n", a[MSW128], a[NMSW128],
a[NLSW128], a[LSW128]);
}
/* Internal shifters:
Note that these are only good for 0 < count < 32.
*/
static inline void _lsl128(unsigned int count, int128 a)
{
a[MSW128] = (a[MSW128] << count) | (a[NMSW128] >> (32 - count));
a[NMSW128] = (a[NMSW128] << count) | (a[NLSW128] >> (32 - count));
a[NLSW128] = (a[NLSW128] << count) | (a[LSW128] >> (32 - count));
a[LSW128] <<= count;
}
static inline void _lsr128(unsigned int count, int128 a)
{
a[LSW128] = (a[LSW128] >> count) | (a[NLSW128] << (32 - count));
a[NLSW128] = (a[NLSW128] >> count) | (a[NMSW128] << (32 - count));
a[NMSW128] = (a[NMSW128] >> count) | (a[MSW128] << (32 - count));
a[MSW128] >>= count;
}
/* Should be faster, one would hope */
static inline void lslone128(int128 a)
{
asm volatile ("lsl.l #1,%0\n"
"roxl.l #1,%1\n"
"roxl.l #1,%2\n"
"roxl.l #1,%3\n"
:
"=d" (a[LSW128]),
"=d"(a[NLSW128]),
"=d"(a[NMSW128]),
"=d"(a[MSW128])
:
"0"(a[LSW128]),
"1"(a[NLSW128]),
"2"(a[NMSW128]),
"3"(a[MSW128]));
}
static inline void lsrone128(int128 a)
{
asm volatile ("lsr.l #1,%0\n"
"roxr.l #1,%1\n"
"roxr.l #1,%2\n"
"roxr.l #1,%3\n"
:
"=d" (a[MSW128]),
"=d"(a[NMSW128]),
"=d"(a[NLSW128]),
"=d"(a[LSW128])
:
"0"(a[MSW128]),
"1"(a[NMSW128]),
"2"(a[NLSW128]),
"3"(a[LSW128]));
}
/* Generalized 128-bit shifters:
These bit-shift to a multiple of 32, then move whole longwords. */
static inline void lsl128(unsigned int count, int128 a)
{
int wordcount, i;
if (count % 32)
_lsl128(count % 32, a);
if (0 == (wordcount = count / 32))
return;
/* argh, gak, endian-sensitive */
for (i = 0; i < 4 - wordcount; i++) {
a[i] = a[i + wordcount];
}
for (i = 3; i >= 4 - wordcount; --i) {
a[i] = 0;
}
}
static inline void lsr128(unsigned int count, int128 a)
{
int wordcount, i;
if (count % 32)
_lsr128(count % 32, a);
if (0 == (wordcount = count / 32))
return;
for (i = 3; i >= wordcount; --i) {
a[i] = a[i - wordcount];
}
for (i = 0; i < wordcount; i++) {
a[i] = 0;
}
}
static inline int orl128(int a, int128 b)
{
b[LSW128] |= a;
}
static inline int btsthi128(const int128 a)
{
return a[MSW128] & 0x80000000;
}
/* test bits (numbered from 0 = LSB) up to and including "top" */
static inline int bftestlo128(int top, const int128 a)
{
int r = 0;
if (top > 31)
r |= a[LSW128];
if (top > 63)
r |= a[NLSW128];
if (top > 95)
r |= a[NMSW128];
r |= a[3 - (top / 32)] & ((1 << (top % 32 + 1)) - 1);
return (r != 0);
}
/* Aargh. We need these because GCC is broken */
/* FIXME: do them in assembly, for goodness' sake! */
static inline void mask64(int pos, unsigned long long *mask)
{
*mask = 0;
if (pos < 32) {
LO_WORD(*mask) = (1 << pos) - 1;
return;
}
LO_WORD(*mask) = -1;
HI_WORD(*mask) = (1 << (pos - 32)) - 1;
}
static inline void bset64(int pos, unsigned long long *dest)
{
/* This conditional will be optimized away. Thanks, GCC! */
if (pos < 32)
asm volatile ("bset %1,%0":"=m"
(LO_WORD(*dest)):"id"(pos));
else
asm volatile ("bset %1,%0":"=m"
(HI_WORD(*dest)):"id"(pos - 32));
}
static inline int btst64(int pos, unsigned long long dest)
{
if (pos < 32)
return (0 != (LO_WORD(dest) & (1 << pos)));
else
return (0 != (HI_WORD(dest) & (1 << (pos - 32))));
}
static inline void lsl64(int count, unsigned long long *dest)
{
if (count < 32) {
HI_WORD(*dest) = (HI_WORD(*dest) << count)
| (LO_WORD(*dest) >> count);
LO_WORD(*dest) <<= count;
return;
}
count -= 32;
HI_WORD(*dest) = LO_WORD(*dest) << count;
LO_WORD(*dest) = 0;
}
static inline void lsr64(int count, unsigned long long *dest)
{
if (count < 32) {
LO_WORD(*dest) = (LO_WORD(*dest) >> count)
| (HI_WORD(*dest) << (32 - count));
HI_WORD(*dest) >>= count;
return;
}
count -= 32;
LO_WORD(*dest) = HI_WORD(*dest) >> count;
HI_WORD(*dest) = 0;
}
#endif
static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
{
reg->exp += cnt;
@ -481,117 +241,6 @@ static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
}
}
#if 0
static inline unsigned int fp_fls128(union fp_mant128 *src)
{
unsigned long data;
unsigned int res, off;
if ((data = src->m32[0]))
off = 0;
else if ((data = src->m32[1]))
off = 32;
else if ((data = src->m32[2]))
off = 64;
else if ((data = src->m32[3]))
off = 96;
else
return 128;
asm ("bfffo %1{#0,#32},%0" : "=d" (res) : "dm" (data));
return res + off;
}
static inline void fp_shiftmant128(union fp_mant128 *src, int shift)
{
unsigned long sticky;
switch (shift) {
case 0:
return;
case 1:
asm volatile ("lsl.l #1,%0"
: "=d" (src->m32[3]) : "0" (src->m32[3]));
asm volatile ("roxl.l #1,%0"
: "=d" (src->m32[2]) : "0" (src->m32[2]));
asm volatile ("roxl.l #1,%0"
: "=d" (src->m32[1]) : "0" (src->m32[1]));
asm volatile ("roxl.l #1,%0"
: "=d" (src->m32[0]) : "0" (src->m32[0]));
return;
case 2 ... 31:
src->m32[0] = (src->m32[0] << shift) | (src->m32[1] >> (32 - shift));
src->m32[1] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
src->m32[2] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
src->m32[3] = (src->m32[3] << shift);
return;
case 32 ... 63:
shift -= 32;
src->m32[0] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
src->m32[1] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
src->m32[2] = (src->m32[3] << shift);
src->m32[3] = 0;
return;
case 64 ... 95:
shift -= 64;
src->m32[0] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
src->m32[1] = (src->m32[3] << shift);
src->m32[2] = src->m32[3] = 0;
return;
case 96 ... 127:
shift -= 96;
src->m32[0] = (src->m32[3] << shift);
src->m32[1] = src->m32[2] = src->m32[3] = 0;
return;
case -31 ... -1:
shift = -shift;
sticky = 0;
if (src->m32[3] << (32 - shift))
sticky = 1;
src->m32[3] = (src->m32[3] >> shift) | (src->m32[2] << (32 - shift)) | sticky;
src->m32[2] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift));
src->m32[1] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
src->m32[0] = (src->m32[0] >> shift);
return;
case -63 ... -32:
shift = -shift - 32;
sticky = 0;
if ((src->m32[2] << (32 - shift)) || src->m32[3])
sticky = 1;
src->m32[3] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift)) | sticky;
src->m32[2] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
src->m32[1] = (src->m32[0] >> shift);
src->m32[0] = 0;
return;
case -95 ... -64:
shift = -shift - 64;
sticky = 0;
if ((src->m32[1] << (32 - shift)) || src->m32[2] || src->m32[3])
sticky = 1;
src->m32[3] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift)) | sticky;
src->m32[2] = (src->m32[0] >> shift);
src->m32[1] = src->m32[0] = 0;
return;
case -127 ... -96:
shift = -shift - 96;
sticky = 0;
if ((src->m32[0] << (32 - shift)) || src->m32[1] || src->m32[2] || src->m32[3])
sticky = 1;
src->m32[3] = (src->m32[0] >> shift) | sticky;
src->m32[2] = src->m32[1] = src->m32[0] = 0;
return;
}
if (shift < 0 && (src->m32[0] || src->m32[1] || src->m32[2] || src->m32[3]))
src->m32[3] = 1;
else
src->m32[3] = 0;
src->m32[2] = 0;
src->m32[1] = 0;
src->m32[0] = 0;
}
#endif
static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
int shift)
{
@ -637,183 +286,4 @@ static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
}
}
#if 0 /* old code... */
static inline int fls(unsigned int a)
{
int r;
asm volatile ("bfffo %1{#0,#32},%0"
: "=d" (r) : "md" (a));
return r;
}
/* fls = "find last set" (cf. ffs(3)) */
static inline int fls128(const int128 a)
{
if (a[MSW128])
return fls(a[MSW128]);
if (a[NMSW128])
return fls(a[NMSW128]) + 32;
/* XXX: it probably never gets beyond this point in actual
use, but that's indicative of a more general problem in the
algorithm (i.e. as per the actual 68881 implementation, we
really only need at most 67 bits of precision [plus
overflow]) so I'm not going to fix it. */
if (a[NLSW128])
return fls(a[NLSW128]) + 64;
if (a[LSW128])
return fls(a[LSW128]) + 96;
else
return -1;
}
static inline int zerop128(const int128 a)
{
return !(a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
}
static inline int nonzerop128(const int128 a)
{
return (a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
}
/* Addition and subtraction */
/* Do these in "pure" assembly, because "extended" asm is unmanageable
here */
static inline void add128(const int128 a, int128 b)
{
/* rotating carry flags */
unsigned int carry[2];
carry[0] = a[LSW128] > (0xffffffff - b[LSW128]);
b[LSW128] += a[LSW128];
carry[1] = a[NLSW128] > (0xffffffff - b[NLSW128] - carry[0]);
b[NLSW128] = a[NLSW128] + b[NLSW128] + carry[0];
carry[0] = a[NMSW128] > (0xffffffff - b[NMSW128] - carry[1]);
b[NMSW128] = a[NMSW128] + b[NMSW128] + carry[1];
b[MSW128] = a[MSW128] + b[MSW128] + carry[0];
}
/* Note: assembler semantics: "b -= a" */
static inline void sub128(const int128 a, int128 b)
{
/* rotating borrow flags */
unsigned int borrow[2];
borrow[0] = b[LSW128] < a[LSW128];
b[LSW128] -= a[LSW128];
borrow[1] = b[NLSW128] < a[NLSW128] + borrow[0];
b[NLSW128] = b[NLSW128] - a[NLSW128] - borrow[0];
borrow[0] = b[NMSW128] < a[NMSW128] + borrow[1];
b[NMSW128] = b[NMSW128] - a[NMSW128] - borrow[1];
b[MSW128] = b[MSW128] - a[MSW128] - borrow[0];
}
/* Poor man's 64-bit expanding multiply */
static inline void mul64(unsigned long long a, unsigned long long b, int128 c)
{
unsigned long long acc;
int128 acc128;
zero128(acc128);
zero128(c);
/* first the low words */
if (LO_WORD(a) && LO_WORD(b)) {
acc = (long long) LO_WORD(a) * LO_WORD(b);
c[NLSW128] = HI_WORD(acc);
c[LSW128] = LO_WORD(acc);
}
/* Next the high words */
if (HI_WORD(a) && HI_WORD(b)) {
acc = (long long) HI_WORD(a) * HI_WORD(b);
c[MSW128] = HI_WORD(acc);
c[NMSW128] = LO_WORD(acc);
}
/* The middle words */
if (LO_WORD(a) && HI_WORD(b)) {
acc = (long long) LO_WORD(a) * HI_WORD(b);
acc128[NMSW128] = HI_WORD(acc);
acc128[NLSW128] = LO_WORD(acc);
add128(acc128, c);
}
/* The first and last words */
if (HI_WORD(a) && LO_WORD(b)) {
acc = (long long) HI_WORD(a) * LO_WORD(b);
acc128[NMSW128] = HI_WORD(acc);
acc128[NLSW128] = LO_WORD(acc);
add128(acc128, c);
}
}
/* Note: unsigned */
static inline int cmp128(int128 a, int128 b)
{
if (a[MSW128] < b[MSW128])
return -1;
if (a[MSW128] > b[MSW128])
return 1;
if (a[NMSW128] < b[NMSW128])
return -1;
if (a[NMSW128] > b[NMSW128])
return 1;
if (a[NLSW128] < b[NLSW128])
return -1;
if (a[NLSW128] > b[NLSW128])
return 1;
return (signed) a[LSW128] - b[LSW128];
}
inline void div128(int128 a, int128 b, int128 c)
{
int128 mask;
/* Algorithm:
Shift the divisor until it's at least as big as the
dividend, keeping track of the position to which we've
shifted it, i.e. the power of 2 which we've multiplied it
by.
Then, for this power of 2 (the mask), and every one smaller
than it, subtract the mask from the dividend and add it to
the quotient until the dividend is smaller than the raised
divisor. At this point, divide the dividend and the mask
by 2 (i.e. shift one place to the right). Lather, rinse,
and repeat, until there are no more powers of 2 left. */
/* FIXME: needless to say, there's room for improvement here too. */
/* Shift up */
/* XXX: since it just has to be "at least as big", we can
probably eliminate this horribly wasteful loop. I will
have to prove this first, though */
set128(0, 0, 0, 1, mask);
while (cmp128(b, a) < 0 && !btsthi128(b)) {
lslone128(b);
lslone128(mask);
}
/* Shift down */
zero128(c);
do {
if (cmp128(a, b) >= 0) {
sub128(b, a);
add128(mask, c);
}
lsrone128(mask);
lsrone128(b);
} while (nonzerop128(mask));
/* The remainder is in a... */
}
#endif
#endif /* MULTI_ARITH_H */

View File

@ -83,11 +83,6 @@ void __init mem_init(void)
int initpages = 0;
int i;
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_mem_init_hook();
#endif
/* this will put all memory onto the freelists */
totalram_pages = num_physpages = 0;
for_each_online_pgdat(pgdat) {

View File

@ -15,6 +15,7 @@ config PARISC
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used

View File

@ -258,10 +258,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
static __inline__ int
static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
{
int ret;
s64 ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);

View File

@ -5,11 +5,14 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
unsigned long int flags;
u32 val;
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
return -EFAULT;
pagefault_disable();
_atomic_spin_lock_irqsave(uaddr, flags);
switch (op) {
case FUTEX_OP_SET:
/* *(int *)UADDR2 = OPARG; */
ret = get_user(oldval, uaddr);
if (!ret)
ret = put_user(oparg, uaddr);
break;
case FUTEX_OP_ADD:
/* *(int *)UADDR2 += OPARG; */
ret = get_user(oldval, uaddr);
if (!ret) {
val = oldval + oparg;
ret = put_user(val, uaddr);
}
break;
case FUTEX_OP_OR:
/* *(int *)UADDR2 |= OPARG; */
ret = get_user(oldval, uaddr);
if (!ret) {
val = oldval | oparg;
ret = put_user(val, uaddr);
}
break;
case FUTEX_OP_ANDN:
/* *(int *)UADDR2 &= ~OPARG; */
ret = get_user(oldval, uaddr);
if (!ret) {
val = oldval & ~oparg;
ret = put_user(val, uaddr);
}
break;
case FUTEX_OP_XOR:
/* *(int *)UADDR2 ^= OPARG; */
ret = get_user(oldval, uaddr);
if (!ret) {
val = oldval ^ oparg;
ret = put_user(val, uaddr);
}
break;
default:
ret = -ENOSYS;
}
_atomic_spin_unlock_irqrestore(uaddr, flags);
pagefault_enable();
if (!ret) {
@ -54,7 +94,9 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret;
u32 val;
unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
if (get_user(val, uaddr))
return -EFAULT;
if (val == oldval && put_user(newval, uaddr))
return -EFAULT;
/* HPPA has no cmpxchg in hardware and therefore the
* best we can do here is use an array of locks. The
* lock selected is based on a hash of the userspace
* address. This should scale to a couple of CPUs.
*/
_atomic_spin_lock_irqsave(uaddr, flags);
ret = get_user(val, uaddr);
if (!ret && val == oldval)
ret = put_user(newval, uaddr);
*uval = val;
return 0;
_atomic_spin_unlock_irqrestore(uaddr, flags);
return ret;
}
#endif /*__KERNEL__*/

View File

@ -821,8 +821,9 @@
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
#define __NR_setns (__NR_Linux + 328)
#define __NR_sendmmsg (__NR_Linux + 329)
#define __NR_Linux_syscalls (__NR_setns + 1)
#define __NR_Linux_syscalls (__NR_sendmmsg + 1)
#define __IGNORE_select /* newselect */

View File

@ -427,6 +427,7 @@
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
ENTRY_SAME(setns)
ENTRY_COMP(sendmmsg)
/* Nothing yet */

View File

@ -136,6 +136,7 @@ config PPC
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_BPF_JIT if (PPC64 && NET)
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
config EARLY_PRINTK
bool

View File

@ -81,6 +81,7 @@ config S390
select INIT_ALL_POSSIBLE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
@ -273,11 +274,11 @@ config MARCH_Z10
on older machines.
config MARCH_Z196
bool "IBM zEnterprise 196"
bool "IBM zEnterprise 114 and 196"
help
Select this to enable optimizations for IBM zEnterprise 196
(2817 series). The kernel will be slightly faster but will not work
on older machines.
Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will
not work on older machines.
endchoice

View File

@ -167,5 +167,6 @@ enum diag308_rc {
};
extern int diag308(unsigned long subcode, void *addr);
extern void diag308_reset(void);
#endif /* _ASM_S390_IPL_H */

View File

@ -18,6 +18,7 @@ void system_call(void);
void pgm_check_handler(void);
void mcck_int_handler(void);
void io_int_handler(void);
void psw_restart_int_handler(void);
#ifdef CONFIG_32BIT
@ -150,7 +151,10 @@ struct _lowcore {
*/
__u32 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e04 */
__u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */
/* 64 bit save area */
__u64 save_area_64; /* 0x0e08 */
__u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
@ -286,7 +290,10 @@ struct _lowcore {
*/
__u64 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e08 */
__u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */
/* 64 bit save area */
__u64 save_area_64; /* 0x0e0c */
__u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */

View File

@ -119,14 +119,12 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
set_fs(USER_DS); \
regs->psw.mask = psw_user_bits; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
set_fs(USER_DS); \
regs->psw.mask = psw_user32_bits; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \

View File

@ -113,6 +113,7 @@ extern void pfault_fini(void);
extern void cmma_init(void);
extern int memcpy_real(void *, void *, size_t);
extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \

View File

@ -27,12 +27,9 @@ int main(void)
BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
DEFINE(__THREAD_per_cause,
offsetof(struct task_struct, thread.per_event.cause));
DEFINE(__THREAD_per_address,
offsetof(struct task_struct, thread.per_event.address));
DEFINE(__THREAD_per_paid,
offsetof(struct task_struct, thread.per_event.paid));
DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@ -142,6 +139,7 @@ int main(void)
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
#ifdef CONFIG_32BIT
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */

View File

@ -76,6 +76,42 @@ s390_base_pgm_handler_fn:
.quad 0
.previous
#
# Calls diag 308 subcode 1 and continues execution
#
# The following conditions must be ensured before calling this function:
# * Prefix register = 0
# * Lowcore protection is disabled
#
ENTRY(diag308_reset)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
lghi %r3,0
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
diag %r1,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,0x12 # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
br %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2
.section .bss
.align 8
.Lctlregs:
.rept 16
.quad 0
.endr
.previous
#else /* CONFIG_64BIT */
ENTRY(s390_base_mcck_handler)

View File

@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void)
goto badframe;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high))
goto badframe;
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
return 0;
@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void)
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high))
goto badframe;
err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
st.ss_sp = compat_ptr(ss_sp);
err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
if (err)
goto badframe;
set_fs (KERNEL_DS);
do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
set_fs (old_fs);
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
return 0;
@ -605,10 +589,10 @@ give_sigsegv:
* OK, we're invoking a handler
*/
int
handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
int handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
sigset_t blocked;
int ret;
/* Set up the stack frame */
@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
ret = setup_rt_frame32(sig, ka, info, oldset, regs);
else
ret = setup_frame32(sig, ka, oldset, regs);
if (ret == 0) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
return ret;
if (ret)
return ret;
sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&blocked, sig);
set_current_blocked(&blocked);
return 0;
}

View File

@ -849,6 +849,34 @@ restart_crash:
restart_go:
#endif
#
# PSW restart interrupt handler
#
ENTRY(psw_restart_int_handler)
st %r15,__LC_SAVE_AREA_64(%r0) # save r15
basr %r15,0
0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
l %r15,0(%r15)
ahi %r15,-SP_SIZE # make room for pt_regs
stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
basr %r14,0
1: l %r14,.Ldo_restart-1b(%r14)
basr %r14,%r14
basr %r14,0 # load disabled wait PSW if
2: lpsw restart_psw_crash-2b(%r14) # do_restart returns
.align 4
.Ldo_restart:
.long do_restart
.Lrestart_stack:
.long restart_stack
.align 8
restart_psw_crash:
.long 0x000a0000,0x00000000 + restart_psw_crash
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK

View File

@ -865,6 +865,26 @@ restart_crash:
restart_go:
#endif
#
# PSW restart interrupt handler
#
ENTRY(psw_restart_int_handler)
stg %r15,__LC_SAVE_AREA_64(%r0) # save r15
larl %r15,restart_stack # load restart stack
lg %r15,0(%r15)
aghi %r15,-SP_SIZE # make room for pt_regs
stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
brasl %r14,do_restart
larl %r14,restart_psw_crash # load disabled wait PSW if
lpswe 0(%r14) # do_restart returns
.align 8
restart_psw_crash:
.quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK

View File

@ -45,11 +45,13 @@
* - halt
* - power off
* - reipl
* - restart
*/
#define ON_PANIC_STR "on_panic"
#define ON_HALT_STR "on_halt"
#define ON_POFF_STR "on_poff"
#define ON_REIPL_STR "on_reboot"
#define ON_RESTART_STR "on_restart"
struct shutdown_action;
struct shutdown_trigger {
@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128];
static char vmcmd_on_panic[128];
static char vmcmd_on_halt[128];
static char vmcmd_on_poff[128];
static char vmcmd_on_restart[128];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
static struct attribute *vmcmd_attrs[] = {
&sys_vmcmd_on_reboot_attr.attr,
&sys_vmcmd_on_panic_attr.attr,
&sys_vmcmd_on_halt_attr.attr,
&sys_vmcmd_on_poff_attr.attr,
&sys_vmcmd_on_restart_attr.attr,
NULL,
};
@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
cmd = vmcmd_on_halt;
else if (strcmp(trigger->name, ON_POFF_STR) == 0)
cmd = vmcmd_on_poff;
else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
cmd = vmcmd_on_restart;
else
return;
@ -1707,6 +1714,34 @@ static void do_panic(void)
stop_run(&on_panic_trigger);
}
/* on restart */
static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
&reipl_action};
static ssize_t on_restart_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_restart_trigger.action->name);
}
static ssize_t on_restart_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_restart_trigger, len);
}
static struct kobj_attribute on_restart_attr =
__ATTR(on_restart, 0644, on_restart_show, on_restart_store);
void do_restart(void)
{
smp_send_stop();
on_restart_trigger.action->fn(&on_restart_trigger);
stop_run(&on_restart_trigger);
}
/* on halt */
static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
@ -1783,7 +1818,9 @@ static void __init shutdown_triggers_init(void)
if (sysfs_create_file(&shutdown_actions_kset->kobj,
&on_poff_attr.attr))
goto fail;
if (sysfs_create_file(&shutdown_actions_kset->kobj,
&on_restart_attr.attr))
goto fail;
return;
fail:
panic("shutdown_triggers_init failed\n");
@ -1959,6 +1996,12 @@ static void do_reset_calls(void)
{
struct reset_call *reset;
#ifdef CONFIG_64BIT
if (diag308_set_works) {
diag308_reset();
return;
}
#endif
list_for_each_entry(reset, &rcall, list)
reset->fn();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright IBM Corp 2000,2009
* Copyright IBM Corp 2000,2011
* Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* Denis Joseph Barrow,
*/
@ -7,6 +7,64 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#
# store_status
#
# Prerequisites to run this function:
# - Prefix register is set to zero
# - Original prefix register is stored in "dump_prefix_page"
# - Lowcore protection is off
#
ENTRY(store_status)
/* Save register one and load save area base */
stg %r1,__LC_SAVE_AREA_64(%r0)
lghi %r1,SAVE_AREA_BASE
/* General purpose registers */
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
lg %r2,__LC_SAVE_AREA_64(%r0)
stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
/* Control registers */
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
/* Access registers */
stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
/* Floating point registers */
std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
/* Floating point control register */
stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
/* CPU timer */
stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
/* Saved prefix register */
larl %r2,dump_prefix_page
mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
/* Clock comparator - seven bytes */
larl %r2,.Lclkcmp
stckc 0(%r2)
mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
/* Program status word */
epsw %r2,%r3
st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
larl %r2,store_status
stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
br %r14
.align 8
.Lclkcmp: .quad 0x0000000000000000
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
@ -15,22 +73,7 @@
ENTRY(do_reipl_asm)
basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers
stg %r1,.Lregsave-.Lpg0(%r13)
lghi %r1,0x1000
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
lg %r0,.Lregsave-.Lpg0(%r13)
stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
lg %r10,.Ldump_pfx-.Lpg0(%r13)
mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13)
stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
.Lpg1: brasl %r14,store_status
lctlg %c6,%c6,.Lall-.Lpg0(%r13)
lgr %r1,%r2
@ -67,10 +110,7 @@ ENTRY(do_reipl_asm)
st %r14,.Ldispsw+12-.Lpg0(%r13)
lpswe .Ldispsw-.Lpg0(%r13)
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
.Ldump_pfx: .quad dump_prefix_page
.Lregsave: .quad 0x0000000000000000
.align 16
/*
* These addresses have to be 31 bit otherwise

View File

@ -346,7 +346,7 @@ setup_lowcore(void)
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
if (user_mode != HOME_SPACE_MODE)
lc->restart_psw.mask |= PSW_ASC_HOME;
lc->external_new_psw.mask = psw_kernel_bits;
@ -529,6 +529,27 @@ static void __init setup_memory_end(void)
memory_end = memory_size;
}
void *restart_stack __attribute__((__section__(".data")));
/*
* Setup new PSW and allocate stack for PSW restart interrupt
*/
static void __init setup_restart_psw(void)
{
psw_t psw;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack += ASYNC_SIZE;
/*
* Setup restart PSW for absolute zero lowcore. This is necesary
* if PSW restart is done on an offline CPU that has lowcore zero
*/
psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
}
static void __init
setup_memory(void)
{
@ -731,6 +752,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z10");
break;
case 0x2817:
case 0x2818:
strcpy(elf_platform, "z196");
break;
}
@ -792,6 +814,7 @@ setup_arch(char **cmdline_p)
setup_addressing_mode();
setup_memory();
setup_resources();
setup_restart_psw();
setup_lowcore();
cpu_init();

View File

@ -57,17 +57,15 @@ typedef struct
*/
SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sigset_t blocked;
current->saved_sigmask = current->blocked;
mask &= _BLOCKABLE;
siginitset(&blocked, mask);
set_current_blocked(&blocked);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
set_restore_sigmask();
return -ERESTARTNOHAND;
}
@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn)
goto badframe;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
set_current_blocked(&set);
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
return 0;
@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto badframe;
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
set_current_blocked(&set);
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
if (do_sigaltstack(&frame->uc.uc_stack, NULL,
regs->gprs[15]) == -EFAULT)
goto badframe;
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
return 0;
@ -385,14 +369,11 @@ give_sigsegv:
return -EFAULT;
}
/*
* OK, we're invoking a handler
*/
static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
static int handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset,
struct pt_regs *regs)
{
sigset_t blocked;
int ret;
/* Set up the stack frame */
@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
if (ret == 0) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
return ret;
if (ret)
return ret;
sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&blocked, sig);
set_current_blocked(&blocked);
return 0;
}
/*

View File

@ -452,23 +452,27 @@ out:
*/
int __cpuinit start_secondary(void *cpuvoid)
{
/* Setup the cpu */
cpu_init();
preempt_disable();
/* Enable TOD clock interrupts on the secondary cpu. */
init_cpu_timer();
/* Enable cpu timer interrupts on the secondary cpu. */
init_cpu_vtimer();
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
/* call cpu notifiers */
notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */
ipi_call_lock();
set_cpu_online(smp_processor_id(), true);
ipi_call_unlock();
/* Switch on interrupts */
__ctl_clear_bit(0, 28); /* Disable lowcore protection */
S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
S390_lowcore.restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
__ctl_set_bit(0, 28); /* Enable lowcore protection */
/*
* Wait until the cpu which brought this one up marked it
* active before enabling interrupts.
*/
while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
cpu_relax();
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
lowcore->async_stack = async_stack + ASYNC_SIZE;
lowcore->panic_stack = panic_stack + PAGE_SIZE;
lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lowcore->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
if (user_mode != HOME_SPACE_MODE)
lowcore->restart_psw.mask |= PSW_ASC_HOME;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
unsigned long save_area;

View File

@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count)
arch_local_irq_restore(flags);
return rc;
}
/*
* Copy memory to absolute zero
*/
void copy_to_absolute_zero(void *dest, void *src, size_t count)
{
unsigned long cr0;
BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
preempt_disable();
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
memcpy_real(dest + store_prefix(), src, count);
__ctl_load(cr0, 0, 0);
preempt_enable();
}

View File

@ -528,6 +528,7 @@ static inline void page_table_free_pgste(unsigned long *table)
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
return NULL;
}
static inline void page_table_free_pgste(unsigned long *table)

View File

@ -11,6 +11,7 @@ config SUPERH
select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2

View File

@ -173,6 +173,7 @@ core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/
cpuincdir-$(CONFIG_CPU_SH2A) += cpu-sh2a
cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last

View File

@ -116,7 +116,7 @@ static int apsh4a3a_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);

View File

@ -94,7 +94,7 @@ static int apsh4ad0a_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);

View File

@ -299,7 +299,7 @@ static int sh7785lcr_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);

View File

@ -190,7 +190,7 @@ static int urquell_clk_init(void)
return -EINVAL;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);

View File

@ -335,8 +335,6 @@ static struct clk *r7780rp_clocks[] = {
&ivdr_clk,
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("ivdr_clk", &ivdr_clk),

View File

@ -194,7 +194,7 @@ static int sdk7786_clk_init(void)
return -EINVAL;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);

View File

@ -0,0 +1,10 @@
#ifndef __CPU_SH3_SERIAL_H
#define __CPU_SH3_SERIAL_H
#include <linux/serial_sci.h>
extern struct plat_sci_port_ops sh770x_sci_port_ops;
extern struct plat_sci_port_ops sh7710_sci_port_ops;
extern struct plat_sci_port_ops sh7720_sci_port_ops;
#endif /* __CPU_SH3_SERIAL_H */

View File

@ -0,0 +1,7 @@
#ifndef __CPU_SH4A_SERIAL_H
#define __CPU_SH4A_SERIAL_H
/* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
extern struct plat_sci_port_ops sh7722_sci_port_ops;
#endif /* __CPU_SH4A_SERIAL_H */

View File

@ -35,8 +35,6 @@ static struct clk *onchip_clocks[] = {
&cpu_clk,
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("master_clk", &master_clk),

View File

@ -7,15 +7,15 @@ obj-y := ex.o probe.o entry.o setup-sh3.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
# CPU subtype setup
obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o
obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o
obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o
obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o
obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o

View File

@ -0,0 +1,33 @@
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#define SCPCR 0xA4000116
#define SCPDR 0xA4000136
static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
/* We need to set SCPCR to enable RTS/CTS */
data = __raw_readw(SCPCR);
/* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
__raw_writew(data & 0x0fcf, SCPCR);
if (!(cflag & CRTSCTS)) {
/* We need to set SCPCR to enable RTS/CTS */
data = __raw_readw(SCPCR);
/* Clear out SCP7MD1,0, SCP4MD1,0,
Set SCP6MD1,0 = {01} (output) */
__raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
data = __raw_readb(SCPDR);
/* Set /RTS2 (bit6) = 0 */
__raw_writeb(data & 0xbf, SCPDR);
}
}
struct plat_sci_port_ops sh770x_sci_port_ops = {
.init_pins = sh770x_sci_init_pins,
};

View File

@ -0,0 +1,20 @@
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#define PACR 0xa4050100
#define PBCR 0xa4050102
static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
if (port->mapbase == 0xA4400000) {
__raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
__raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
} else if (port->mapbase == 0xA4410000)
__raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
}
struct plat_sci_port_ops sh7710_sci_port_ops = {
.init_pins = sh7710_sci_init_pins,
};

View File

@ -0,0 +1,37 @@
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#include <asm/gpio.h>
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
if (cflag & CRTSCTS) {
/* enable RTS/CTS */
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 9-2; enable all scif pins but sck */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xfc03), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 9-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xfc03), PORT_PVCR);
}
} else {
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 5-2; enable only tx and rx */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xffc3), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 5-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xffc3), PORT_PVCR);
}
}
}
struct plat_sci_port_ops sh7720_sci_port_ops = {
.init_pins = sh7720_sci_init_pins,
};

View File

@ -15,6 +15,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <asm/rtc.h>
#include <cpu/serial.h>
enum {
UNUSED = 0,
@ -75,6 +76,8 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 56, 56, 56 },
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif0_device = {
@ -92,6 +95,8 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 52, 52, 52 },
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif1_device = {

Some files were not shown because too many files have changed in this diff Show More