2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

Merge branch 'master' of /home/davem/src/GIT/linux-2.6/

This commit is contained in:
David S. Miller 2009-12-01 15:46:35 -08:00
commit 7e8f44f8d4
222 changed files with 3998 additions and 913 deletions

1
.gitignore vendored
View File

@ -25,6 +25,7 @@
*.elf
*.bin
*.gz
*.bz2
*.lzma
*.patch
*.gcno

View File

@ -312,10 +312,8 @@ and to the following documentation:
8. Mailing list
---------------
There are several frame buffer device related mailing lists at SourceForge:
- linux-fbdev-announce@lists.sourceforge.net, for announcements,
- linux-fbdev-user@lists.sourceforge.net, for generic user support,
- linux-fbdev-devel@lists.sourceforge.net, for project developers.
There is a frame buffer device related mailing list at kernel.org:
linux-fbdev@vger.kernel.org.
Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for
subscription information and archive browsing.

View File

@ -235,6 +235,7 @@ proc files.
neg=N Number of negative lookups made
pos=N Number of positive lookups made
crt=N Number of objects created by lookup
tmo=N Number of lookups timed out and requeued
Updates n=N Number of update cookie requests seen
nul=N Number of upd reqs given a NULL parent
run=N Number of upd reqs granted CPU time
@ -250,8 +251,10 @@ proc files.
ok=N Number of successful alloc reqs
wt=N Number of alloc reqs that waited on lookup completion
nbf=N Number of alloc reqs rejected -ENOBUFS
int=N Number of alloc reqs aborted -ERESTARTSYS
ops=N Number of alloc reqs submitted
owt=N Number of alloc reqs waited for CPU time
abt=N Number of alloc reqs aborted due to object death
Retrvls n=N Number of retrieval (read) requests seen
ok=N Number of successful retr reqs
wt=N Number of retr reqs that waited on lookup completion
@ -261,6 +264,7 @@ proc files.
oom=N Number of retr reqs failed -ENOMEM
ops=N Number of retr reqs submitted
owt=N Number of retr reqs waited for CPU time
abt=N Number of retr reqs aborted due to object death
Stores n=N Number of storage (write) requests seen
ok=N Number of successful store reqs
agn=N Number of store reqs on a page already pending storage
@ -268,12 +272,37 @@ proc files.
oom=N Number of store reqs failed -ENOMEM
ops=N Number of store reqs submitted
run=N Number of store reqs granted CPU time
pgs=N Number of pages given store req processing time
rxd=N Number of store reqs deleted from tracking tree
olm=N Number of store reqs over store limit
VmScan nos=N Number of release reqs against pages with no pending store
gon=N Number of release reqs against pages stored by time lock granted
bsy=N Number of release reqs ignored due to in-progress store
can=N Number of page stores cancelled due to release req
Ops pend=N Number of times async ops added to pending queues
run=N Number of times async ops given CPU time
enq=N Number of times async ops queued for processing
can=N Number of async ops cancelled
rej=N Number of async ops rejected due to object lookup/create failure
dfr=N Number of async ops queued for deferred release
rel=N Number of async ops released
gc=N Number of deferred-release async ops garbage collected
CacheOp alo=N Number of in-progress alloc_object() cache ops
luo=N Number of in-progress lookup_object() cache ops
luc=N Number of in-progress lookup_complete() cache ops
gro=N Number of in-progress grab_object() cache ops
upo=N Number of in-progress update_object() cache ops
dro=N Number of in-progress drop_object() cache ops
pto=N Number of in-progress put_object() cache ops
syn=N Number of in-progress sync_cache() cache ops
atc=N Number of in-progress attr_changed() cache ops
rap=N Number of in-progress read_or_alloc_page() cache ops
ras=N Number of in-progress read_or_alloc_pages() cache ops
alp=N Number of in-progress allocate_page() cache ops
als=N Number of in-progress allocate_pages() cache ops
wrp=N Number of in-progress write_page() cache ops
ucp=N Number of in-progress uncache_page() cache ops
dsp=N Number of in-progress dissociate_pages() cache ops
(*) /proc/fs/fscache/histogram
@ -299,6 +328,87 @@ proc files.
jiffy range covered, and the SECS field the equivalent number of seconds.
===========
OBJECT LIST
===========
If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
list of all the objects currently allocated and allow them to be viewed
through:
/proc/fs/fscache/objects
This will look something like:
[root@andromeda ~]# head /proc/fs/fscache/objects
OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA
======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
where the first set of columns before the '|' describe the object:
COLUMN DESCRIPTION
======= ===============================================================
OBJECT Object debugging ID (appears as OBJ%x in some debug messages)
PARENT Debugging ID of parent object
STAT Object state
CHLDN Number of child objects of this object
OPS Number of outstanding operations on this object
OOP Number of outstanding child object management operations
IPR
EX Number of outstanding exclusive operations
READS Number of outstanding read operations
EM Object's event mask
EV Events raised on this object
F Object flags
S Object slow-work work item flags
and the second set of columns describe the object's cookie, if present:
COLUMN DESCRIPTION
=============== =======================================================
NETFS_COOKIE_DEF Name of netfs cookie definition
TY Cookie type (IX - index, DT - data, hex - special)
FL Cookie flags
NETFS_DATA Netfs private data stored in the cookie
OBJECT_KEY Object key } 1 column, with separating comma
AUX_DATA Object aux data } presence may be configured
The data shown may be filtered by attaching the a key to an appropriate keyring
before viewing the file. Something like:
keyctl add user fscache:objlist <restrictions> @s
where <restrictions> are a selection of the following letters:
K Show hexdump of object key (don't show if not given)
A Show hexdump of object aux data (don't show if not given)
and the following paired letters:
C Show objects that have a cookie
c Show objects that don't have a cookie
B Show objects that are busy
b Show objects that aren't busy
W Show objects that have pending writes
w Show objects that don't have pending writes
R Show objects that have outstanding reads
r Show objects that don't have outstanding reads
S Show objects that have slow work queued
s Show objects that don't have slow work queued
If neither side of a letter pair is given, then both are implied. For example:
keyctl add user fscache:objlist KB @s
shows objects that are busy, and lists their object keys, but does not dump
their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is
not implied.
By default all objects and all fields will be shown.
=========
DEBUGGING
=========

View File

@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below).
Furthermore, note that this does not cancel the asynchronous read or write
operation started by the read/alloc and write functions, so the page
invalidation and release functions must use:
invalidation functions must use:
bool fscache_check_page_write(struct fscache_cookie *cookie,
struct page *page);
@ -654,6 +654,25 @@ to see if a page is being written to the cache, and:
to wait for it to finish if it is.
When releasepage() is being implemented, a special FS-Cache function exists to
manage the heuristics of coping with vmscan trying to eject pages, which may
conflict with the cache trying to write pages to the cache (which may itself
need to allocate memory):
bool fscache_maybe_release_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp);
This takes the netfs cookie, and the page and gfp arguments as supplied to
releasepage(). It will return false if the page cannot be released yet for
some reason and if it returns true, the page has been uncached and can now be
released.
To make a page available for release, this function may wait for an outstanding
storage request to complete, or it may attempt to cancel the storage request -
in which case the page will not be stored in the cache this time.
==========================
INDEX AND DATA FILE UPDATE
==========================

View File

@ -20,15 +20,16 @@ Lots of code taken from ext3 and other projects.
Authors in alphabetical order:
Joel Becker <joel.becker@oracle.com>
Zach Brown <zach.brown@oracle.com>
Mark Fasheh <mark.fasheh@oracle.com>
Mark Fasheh <mfasheh@suse.com>
Kurt Hackel <kurt.hackel@oracle.com>
Tao Ma <tao.ma@oracle.com>
Sunil Mushran <sunil.mushran@oracle.com>
Manish Singh <manish.singh@oracle.com>
Tiger Yang <tiger.yang@oracle.com>
Caveats
=======
Features which OCFS2 does not support yet:
- quotas
- Directory change notification (F_NOTIFY)
- Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
@ -70,7 +71,6 @@ commit=nrsec (*) Ocfs2 can be told to sync all its data and metadata
performance.
localalloc=8(*) Allows custom localalloc size in MB. If the value is too
large, the fs will silently revert it to the default.
Localalloc is not enabled for local mounts.
localflocks This disables cluster aware flock.
inode64 Indicates that Ocfs2 is allowed to create inodes at
any location in the filesystem, including those which

View File

@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long.
Operations of both types may sleep during execution, thus tying up the thread
loaned to it.
A further class of work item is available, based on the slow work item class:
(*) Delayed slow work items.
These are slow work items that have a timer to defer queueing of the item for
a while.
THREAD-TO-CLASS ALLOCATION
--------------------------
@ -64,9 +71,11 @@ USING SLOW WORK ITEMS
Firstly, a module or subsystem wanting to make use of slow work items must
register its interest:
int ret = slow_work_register_user();
int ret = slow_work_register_user(struct module *module);
This will return 0 if successful, or a -ve error upon failure.
This will return 0 if successful, or a -ve error upon failure. The module
pointer should be the module interested in using this facility (almost
certainly THIS_MODULE).
Slow work items may then be set up by:
@ -91,6 +100,10 @@ Slow work items may then be set up by:
slow_work_init(&myitem, &myitem_ops);
or:
delayed_slow_work_init(&myitem, &myitem_ops);
or:
vslow_work_init(&myitem, &myitem_ops);
@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing:
int ret = slow_work_enqueue(&myitem);
This will return a -ve error if the thread pool is unable to gain a reference
on the item, 0 otherwise.
on the item, 0 otherwise, or (for delayed work):
int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
The items are reference counted, so there ought to be no need for a flush
operation. When all a module's slow work items have been processed, and the
operation. But as the reference counting is optional, means to cancel
existing work items are also included:
cancel_slow_work(&myitem);
cancel_delayed_slow_work(&myitem);
can be used to cancel pending work. The above cancel function waits for
existing work to have been executed (or prevent execution of them, depending
on timing).
When all a module's slow work items have been processed, and the
module has no further interest in the facility, it should unregister its
interest:
slow_work_unregister_user();
slow_work_unregister_user(struct module *module);
The module pointer is used to wait for all outstanding work items for that
module before completing the unregistration. This prevents the put_ref() code
from being taken away before it completes. module should almost certainly be
THIS_MODULE.
================
HELPER FUNCTIONS
================
The slow-work facility provides a function by which it can be determined
whether or not an item is queued for later execution:
bool queued = slow_work_is_queued(struct slow_work *work);
If it returns false, then the item is not on the queue (it may be executing
with a requeue pending). This can be used to work out whether an item on which
another depends is on the queue, thus allowing a dependent item to be queued
after it.
If the above shows an item on which another depends not to be queued, then the
owner of the dependent item might need to wait. However, to avoid locking up
the threads unnecessarily be sleeping in them, it can make sense under some
circumstances to return the work item to the queue, thus deferring it until
some other items have had a chance to make use of the yielded thread.
To yield a thread and defer an item, the work function should simply enqueue
the work item again and return. However, this doesn't work if there's nothing
actually on the queue, as the thread just vacated will jump straight back into
the item's work function, thus busy waiting on a CPU.
Instead, the item should use the thread to wait for the dependency to go away,
but rather than using schedule() or schedule_timeout() to sleep, it should use
the following function:
bool requeue = slow_work_sleep_till_thread_needed(
struct slow_work *work,
signed long *_timeout);
This will add a second wait and then sleep, such that it will be woken up if
either something appears on the queue that could usefully make use of the
thread - and behind which this item can be queued, or if the event the caller
set up to wait for happens. True will be returned if something else appeared
on the queue and this work function should perhaps return, of false if
something else woke it up. The timeout is as for schedule_timeout().
For example:
wq = bit_waitqueue(&my_flags, MY_BIT);
init_wait(&wait);
requeue = false;
do {
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(MY_BIT, &my_flags))
break;
requeue = slow_work_sleep_till_thread_needed(&my_work,
&timeout);
} while (timeout > 0 && !requeue);
finish_wait(wq, &wait);
if (!test_bit(MY_BIT, &my_flags)
goto do_my_thing;
if (requeue)
return; // to slow_work
===============
@ -118,7 +208,8 @@ ITEM OPERATIONS
===============
Each work item requires a table of operations of type struct slow_work_ops.
All members are required:
Only ->execute() is required; the getting and putting of a reference and the
describing of an item are all optional.
(*) Get a reference on an item:
@ -148,6 +239,16 @@ All members are required:
This should perform the work required of the item. It may sleep, it may
perform disk I/O and it may wait for locks.
(*) View an item through /proc:
void (*desc)(struct slow_work *work, struct seq_file *m);
If supplied, this should print to 'm' a small string describing the work
the item is to do. This should be no more than about 40 characters, and
shouldn't include a newline character.
See the 'Viewing executing and queued items' section below.
==================
POOL CONFIGURATION
@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables:
is bounded to between 1 and one fewer than the number of active threads.
This ensures there is always at least one thread that can process very
slow work items, and always at least one thread that won't.
==================================
VIEWING EXECUTING AND QUEUED ITEMS
==================================
If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available:
/sys/kernel/debug/slow_work/runqueue
through which the list of work items being executed and the queues of items to
be executed may be viewed. The owner of a work item is given the chance to
add some information of its own.
The contents look something like the following:
THR PID ITEM ADDR FL MARK DESC
=== ===== ================ == ===== ==========
0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK
1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK
3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN
4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN
vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK
vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK
vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK
vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK
vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK
vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK
vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK
vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK
In the 'THR' column, executing items show the thread they're occupying and
queued threads indicate which queue they're on. 'PID' shows the process ID of
a slow-work thread that's executing something. 'FL' shows the work item flags.
'MARK' indicates how long since an item was queued or began executing. Lastly,
the 'DESC' column permits the owner of an item to give some information.

View File

@ -512,10 +512,32 @@ W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/
ARM PRIMECELL AACI PL041 DRIVER
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: sound/arm/aaci.*
ARM PRIMECELL CLCD PL110 DRIVER
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/video/amba-clcd.*
ARM PRIMECELL KMI PL050 DRIVER
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/input/serio/ambakmi.*
F: include/linux/amba/kmi.h
ARM PRIMECELL MMCI PL180/1 DRIVER
S: Orphan
F: drivers/mmc/host/mmci.*
ARM PRIMECELL BUS SUPPORT
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/amba/
F: include/linux/amba/bus.h
ARM/ADI ROADRUNNER MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -1027,7 +1049,7 @@ F: drivers/serial/atmel_serial.c
ATMEL LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/atmel_lcdfb.c
F: include/video/atmel_lcdc.h
@ -2113,7 +2135,7 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
W: http://linux-fbdev.sourceforge.net/
S: Orphan
F: Documentation/fb/
@ -2136,7 +2158,7 @@ F: drivers/i2c/busses/i2c-cpm.c
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/plat-mxc/include/mach/imxfb.h
@ -2312,6 +2334,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: drivers/media/video/gspca/finepix.c
GSPCA GL860 SUBDRIVER
M: Olivier Lorin <o.lorin@laposte.net>
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: drivers/media/video/gspca/gl860/
GSPCA M5602 SUBDRIVER
M: Erik Andren <erik.andren@gmail.com>
L: linux-media@vger.kernel.org
@ -2533,8 +2562,7 @@ S: Maintained
F: Documentation/i2c/
F: drivers/i2c/
F: include/linux/i2c.h
F: include/linux/i2c-dev.h
F: include/linux/i2c-id.h
F: include/linux/i2c-*.h
I2C-TINY-USB DRIVER
M: Till Harbaum <till@harbaum.org>
@ -2635,7 +2663,7 @@ S: Supported
F: security/integrity/ima/
IMS TWINTURBO FRAMEBUFFER DRIVER
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Orphan
F: drivers/video/imsttfb.c
@ -2670,14 +2698,14 @@ F: drivers/input/
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Sylvain Meyer <sylvain.meyer@worldonline.fr>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: Documentation/fb/intelfb.txt
F: drivers/video/intelfb/
INTEL 810/815 FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/i810/
@ -3391,7 +3419,7 @@ S: Supported
MATROX FRAMEBUFFER DRIVER
M: Petr Vandrovec <vandrove@vc.cvut.cz>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
@ -3778,7 +3806,7 @@ F: fs/ntfs/
NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/riva/
F: drivers/video/nvidia/
@ -3813,7 +3841,7 @@ F: sound/soc/omap/
OMAP FRAMEBUFFER SUPPORT
M: Imre Deak <imre.deak@nokia.com>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/video/omap/
@ -4319,14 +4347,14 @@ F: include/linux/qnxtypes.h
RADEON FRAMEBUFFER DISPLAY DRIVER
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/aty/radeon*
F: include/linux/radeonfb.h
RAGE128 FRAMEBUFFER DISPLAY DRIVER
M: Paul Mackerras <paulus@samba.org>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/aty/aty128fb.c
@ -4465,7 +4493,7 @@ F: drivers/net/wireless/rtl818x/rtl8187*
S3 SAVAGE FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/savage/
@ -5628,7 +5656,7 @@ S: Maintained
UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
W: http://dev.gentoo.org/~spock/projects/uvesafb/
S: Maintained
F: Documentation/fb/uvesafb.txt
@ -5661,7 +5689,7 @@ F: drivers/mmc/host/via-sdmmc.c
VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
M: Joseph Chan <JosephChan@via.com.tw>
M: Scott Fang <ScottFang@viatech.com.cn>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/via/

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 32
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*

View File

@ -61,21 +61,24 @@ register struct thread_info *__current_thread_info __asm__("$8");
/*
* Thread information flags:
* - these are process state flags and used from assembly
* - pending work-to-be-done flags come first to fit in and immediate operand.
* - pending work-to-be-done flags come first and must be assigned to be
* within bits 0 to 7 to fit in and immediate operand.
* - ALPHA_UAC_SHIFT below must be kept consistent with the unaligned
* control flags.
*
* TIF_SYSCALL_TRACE is known to be 0 via blbs.
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */
#define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */
#define TIF_UAC_NOPRINT 5 /* see sysinfo.h */
#define TIF_UAC_NOFIX 6
#define TIF_UAC_SIGBUS 7
#define TIF_MEMDIE 8
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
#define TIF_NOTIFY_RESUME 10 /* callback before returning to user */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */
#define TIF_UAC_NOFIX 11
#define TIF_UAC_SIGBUS 12
#define TIF_MEMDIE 13
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@ -94,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
| _TIF_SYSCALL_TRACE)
#define ALPHA_UAC_SHIFT 6
#define ALPHA_UAC_SHIFT 10
#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \
1 << TIF_UAC_SIGBUS)

View File

@ -1103,6 +1103,8 @@ marvel_agp_info(void)
* Allocate the info structure.
*/
agp = kmalloc(sizeof(*agp), GFP_KERNEL);
if (!agp)
return NULL;
/*
* Fill it in.

View File

@ -757,6 +757,8 @@ titan_agp_info(void)
* Allocate the info structure.
*/
agp = kmalloc(sizeof(*agp), GFP_KERNEL);
if (!agp)
return NULL;
/*
* Fill it in.

View File

@ -92,7 +92,7 @@ show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
#endif
seq_printf(p, " %14s", irq_desc[irq].chip->typename);
seq_printf(p, " %14s", irq_desc[irq].chip->name);
seq_printf(p, " %c%s",
(action->flags & IRQF_DISABLED)?'+':' ',
action->name);

View File

@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
};
static struct irq_chip rtc_irq_type = {
.typename = "RTC",
.name = "RTC",
.startup = rtc_startup,
.shutdown = rtc_enable_disable,
.enable = rtc_enable_disable,

View File

@ -84,7 +84,7 @@ i8259a_end_irq(unsigned int irq)
}
struct irq_chip i8259a_irq_type = {
.typename = "XT-PIC",
.name = "XT-PIC",
.startup = i8259a_startup_irq,
.shutdown = i8259a_disable_irq,
.enable = i8259a_enable_irq,

View File

@ -71,7 +71,7 @@ pyxis_mask_and_ack_irq(unsigned int irq)
}
static struct irq_chip pyxis_irq_type = {
.typename = "PYXIS",
.name = "PYXIS",
.startup = pyxis_startup_irq,
.shutdown = pyxis_disable_irq,
.enable = pyxis_enable_irq,

View File

@ -49,7 +49,7 @@ srm_end_irq(unsigned int irq)
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = {
.typename = "SRM",
.name = "SRM",
.startup = srm_startup_irq,
.shutdown = srm_disable_irq,
.enable = srm_enable_irq,

View File

@ -90,7 +90,7 @@ alcor_end_irq(unsigned int irq)
}
static struct irq_chip alcor_irq_type = {
.typename = "ALCOR",
.name = "ALCOR",
.startup = alcor_startup_irq,
.shutdown = alcor_disable_irq,
.enable = alcor_enable_irq,

View File

@ -72,7 +72,7 @@ cabriolet_end_irq(unsigned int irq)
}
static struct irq_chip cabriolet_irq_type = {
.typename = "CABRIOLET",
.name = "CABRIOLET",
.startup = cabriolet_startup_irq,
.shutdown = cabriolet_disable_irq,
.enable = cabriolet_enable_irq,

View File

@ -199,7 +199,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
}
static struct irq_chip dp264_irq_type = {
.typename = "DP264",
.name = "DP264",
.startup = dp264_startup_irq,
.shutdown = dp264_disable_irq,
.enable = dp264_enable_irq,
@ -210,7 +210,7 @@ static struct irq_chip dp264_irq_type = {
};
static struct irq_chip clipper_irq_type = {
.typename = "CLIPPER",
.name = "CLIPPER",
.startup = clipper_startup_irq,
.shutdown = clipper_disable_irq,
.enable = clipper_enable_irq,

View File

@ -70,7 +70,7 @@ eb64p_end_irq(unsigned int irq)
}
static struct irq_chip eb64p_irq_type = {
.typename = "EB64P",
.name = "EB64P",
.startup = eb64p_startup_irq,
.shutdown = eb64p_disable_irq,
.enable = eb64p_enable_irq,

View File

@ -81,7 +81,7 @@ eiger_end_irq(unsigned int irq)
}
static struct irq_chip eiger_irq_type = {
.typename = "EIGER",
.name = "EIGER",
.startup = eiger_startup_irq,
.shutdown = eiger_disable_irq,
.enable = eiger_enable_irq,

View File

@ -119,7 +119,7 @@ jensen_local_end(unsigned int irq)
}
static struct irq_chip jensen_local_irq_type = {
.typename = "LOCAL",
.name = "LOCAL",
.startup = jensen_local_startup,
.shutdown = jensen_local_shutdown,
.enable = jensen_local_enable,

View File

@ -170,7 +170,7 @@ marvel_irq_noop_return(unsigned int irq)
}
static struct irq_chip marvel_legacy_irq_type = {
.typename = "LEGACY",
.name = "LEGACY",
.startup = marvel_irq_noop_return,
.shutdown = marvel_irq_noop,
.enable = marvel_irq_noop,
@ -180,7 +180,7 @@ static struct irq_chip marvel_legacy_irq_type = {
};
static struct irq_chip io7_lsi_irq_type = {
.typename = "LSI",
.name = "LSI",
.startup = io7_startup_irq,
.shutdown = io7_disable_irq,
.enable = io7_enable_irq,
@ -190,7 +190,7 @@ static struct irq_chip io7_lsi_irq_type = {
};
static struct irq_chip io7_msi_irq_type = {
.typename = "MSI",
.name = "MSI",
.startup = io7_startup_irq,
.shutdown = io7_disable_irq,
.enable = io7_enable_irq,

View File

@ -69,7 +69,7 @@ mikasa_end_irq(unsigned int irq)
}
static struct irq_chip mikasa_irq_type = {
.typename = "MIKASA",
.name = "MIKASA",
.startup = mikasa_startup_irq,
.shutdown = mikasa_disable_irq,
.enable = mikasa_enable_irq,

View File

@ -74,7 +74,7 @@ noritake_end_irq(unsigned int irq)
}
static struct irq_chip noritake_irq_type = {
.typename = "NORITAKE",
.name = "NORITAKE",
.startup = noritake_startup_irq,
.shutdown = noritake_disable_irq,
.enable = noritake_enable_irq,

View File

@ -136,7 +136,7 @@ rawhide_end_irq(unsigned int irq)
}
static struct irq_chip rawhide_irq_type = {
.typename = "RAWHIDE",
.name = "RAWHIDE",
.startup = rawhide_startup_irq,
.shutdown = rawhide_disable_irq,
.enable = rawhide_enable_irq,

View File

@ -66,7 +66,7 @@ ruffian_init_irq(void)
common_init_isa_dma();
}
#define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
#define RUFFIAN_LATCH DIV_ROUND_CLOSEST(PIT_TICK_RATE, HZ)
static void __init
ruffian_init_rtc(void)

View File

@ -73,7 +73,7 @@ rx164_end_irq(unsigned int irq)
}
static struct irq_chip rx164_irq_type = {
.typename = "RX164",
.name = "RX164",
.startup = rx164_startup_irq,
.shutdown = rx164_disable_irq,
.enable = rx164_enable_irq,

View File

@ -502,7 +502,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
}
static struct irq_chip sable_lynx_irq_type = {
.typename = "SABLE/LYNX",
.name = "SABLE/LYNX",
.startup = sable_lynx_startup_irq,
.shutdown = sable_lynx_disable_irq,
.enable = sable_lynx_enable_irq,

View File

@ -75,7 +75,7 @@ takara_end_irq(unsigned int irq)
}
static struct irq_chip takara_irq_type = {
.typename = "TAKARA",
.name = "TAKARA",
.startup = takara_startup_irq,
.shutdown = takara_disable_irq,
.enable = takara_enable_irq,

View File

@ -195,7 +195,7 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
}
static struct irq_chip titan_irq_type = {
.typename = "TITAN",
.name = "TITAN",
.startup = titan_startup_irq,
.shutdown = titan_disable_irq,
.enable = titan_enable_irq,

View File

@ -158,7 +158,7 @@ wildfire_end_irq(unsigned int irq)
}
static struct irq_chip wildfire_irq_type = {
.typename = "WILDFIRE",
.name = "WILDFIRE",
.startup = wildfire_startup_irq,
.shutdown = wildfire_disable_irq,
.enable = wildfire_enable_irq,

View File

@ -22,4 +22,10 @@ enum km_type {
KM_TYPE_NR
};
#ifdef CONFIG_DEBUG_HIGHMEM
#define KM_NMI (-1)
#define KM_NMI_PTE (-1)
#define KM_IRQ_PTE (-1)
#endif
#endif

View File

@ -662,8 +662,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
regs->ARM_sp -= 4;
usp = (u32 __user *)regs->ARM_sp;
put_user(regs->ARM_pc, usp);
if (put_user(regs->ARM_pc, usp) == 0) {
regs->ARM_pc = KERN_RESTART_CODE;
} else {
regs->ARM_sp += 4;
force_sigsegv(0, current);
}
#endif
}
}

View File

@ -447,6 +447,7 @@ static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
pxa27x_freq_table[i].frequency = freq;
pxa27x_freq_table[i].index = i;
}
pxa27x_freq_table[i].index = i;
pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
/*

View File

@ -102,7 +102,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
table[i].index = i;
table[i].frequency = freqs[i].cpufreq_mhz * 1000;
}
table[num].frequency = i;
table[num].index = i;
table[num].frequency = CPUFREQ_TABLE_END;
pxa3xx_freqs = freqs;

View File

@ -802,10 +802,12 @@ static void __init spitz_init(void)
{
spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
#ifdef CONFIG_MACH_BORZOI
if (machine_is_borzoi()) {
sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
}
#endif
platform_scoop_config = &spitz_pcmcia_config;

View File

@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
# Last update: Fri Sep 18 21:42:00 2009
# Last update: Wed Nov 25 22:14:58 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -928,7 +928,7 @@ palmt5 MACH_PALMT5 PALMT5 917
palmtc MACH_PALMTC PALMTC 918
omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
rea_2d MACH_REA_2D REA_2D 921
rea_cpu2 MACH_REA_2D REA_2D 921
eti3e524 MACH_TI3E524 TI3E524 922
ateb9200 MACH_ATEB9200 ATEB9200 923
auckland MACH_AUCKLAND AUCKLAND 924
@ -2421,3 +2421,118 @@ liberty MACH_LIBERTY LIBERTY 2434
mh355 MACH_MH355 MH355 2435
pc7802 MACH_PC7802 PC7802 2436
gnet_sgc MACH_GNET_SGC GNET_SGC 2437
einstein15 MACH_EINSTEIN15 EINSTEIN15 2438
cmpd MACH_CMPD CMPD 2439
davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440
lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441
ea313x MACH_EA313X EA313X 2442
fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443
fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444
pelco_moe MACH_PELCO_MOE PELCO_MOE 2445
minimix27 MACH_MINIMIX27 MINIMIX27 2446
omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447
passionc MACH_PASSIONC PASSIONC 2448
mx27amata MACH_MX27AMATA MX27AMATA 2449
bgat1 MACH_BGAT1 BGAT1 2450
buzz MACH_BUZZ BUZZ 2451
mb9g20 MACH_MB9G20 MB9G20 2452
yushan MACH_YUSHAN YUSHAN 2453
lizard MACH_LIZARD LIZARD 2454
omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455
smdkv210 MACH_SMDKV210 SMDKV210 2456
bravo MACH_BRAVO BRAVO 2457
siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458
siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459
sm3k MACH_SM3K SM3K 2460
acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461
sst61vc010_dev MACH_SST61VC010_DEV SST61VC010_DEV 2462
glittertind MACH_GLITTERTIND GLITTERTIND 2463
omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
torino_s MACH_TORINO_S TORINO_S 2467
havana MACH_HAVANA HAVANA 2468
beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469
vanguard MACH_VANGUARD VANGUARD 2470
s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471
cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472
aster MACH_ASTER ASTER 2473
voguesv210 MACH_VOGUESV210 VOGUESV210 2474
acm500x MACH_ACM500X ACM500X 2475
km9260 MACH_KM9260 KM9260 2476
nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477
ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478
smartq7 MACH_SMARTQ7 SMARTQ7 2479
at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480
asusp527 MACH_ASUSP527 ASUSP527 2481
at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482
topasa900 MACH_TOPASA900 TOPASA900 2483
electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484
mx51grb MACH_MX51GRB MX51GRB 2485
xea300 MACH_XEA300 XEA300 2486
htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487
lima MACH_LIMA LIMA 2488
csb740 MACH_CSB740 CSB740 2489
usb_s8815 MACH_USB_S8815 USB_S8815 2490
watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
milkyway MACH_MILKYWAY MILKYWAY 2492
g4evm MACH_G4EVM G4EVM 2493
picomod6 MACH_PICOMOD6 PICOMOD6 2494
omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
ip6000 MACH_IP6000 IP6000 2496
ip6010 MACH_IP6010 IP6010 2497
utm400 MACH_UTM400 UTM400 2498
omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499
wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500
sx560 MACH_SX560 SX560 2501
ts41x MACH_TS41X TS41X 2502
elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503
rhobot MACH_RHOBOT RHOBOT 2504
mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505
ls9260 MACH_LS9260 LS9260 2506
shank MACH_SHANK SHANK 2507
qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508
at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509
hiram MACH_HIRAM HIRAM 2510
phy3250 MACH_PHY3250 PHY3250 2511
ea3250 MACH_EA3250 EA3250 2512
fdi3250 MACH_FDI3250 FDI3250 2513
whitestone MACH_WHITESTONE WHITESTONE 2514
at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515
ccmx51 MACH_CCMX51 CCMX51 2516
ccmx51js MACH_CCMX51JS CCMX51JS 2517
ccwmx51 MACH_CCWMX51 CCWMX51 2518
ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519
mini6410 MACH_MINI6410 MINI6410 2520
tiny6410 MACH_TINY6410 TINY6410 2521
nano6410 MACH_NANO6410 NANO6410 2522
at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523
htcleo MACH_HTCLEO HTCLEO 2524
avp13 MACH_AVP13 AVP13 2525
xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526
vpnext MACH_VPNEXT VPNEXT 2527
swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528
tx51 MACH_TX51 TX51 2529
dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530
mx28evk MACH_MX28EVK MX28EVK 2531
phoenix260 MACH_PHOENIX260 PHOENIX260 2532
uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533
smartq5 MACH_SMARTQ5 SMARTQ5 2534
all3078 MACH_ALL3078 ALL3078 2535
ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536
siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537
epb5000 MACH_EPB5000 EPB5000 2538
hy9263 MACH_HY9263 HY9263 2539
acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540
acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541
acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542
acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543
acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544
bonnell MACH_BONNELL BONNELL 2545
oht_mx27 MACH_OHT_MX27 OHT_MX27 2546
htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547
davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
c3ax03 MACH_C3AX03 C3AX03 2549
mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
esyx MACH_ESYX ESYX 2551
bulldog MACH_BULLDOG BULLDOG 2553

View File

@ -225,9 +225,14 @@ int blackfin_dma_suspend(void)
void blackfin_dma_resume(void)
{
int i;
for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i)
for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
dma_ch[i].regs->cfg = 0;
if (i < MAX_DMA_SUSPEND_CHANNELS)
dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
}
}
#endif
/**

View File

@ -38,7 +38,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_cache = CPLB_L1_CHBL;
#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif

View File

@ -151,7 +151,7 @@ void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_
regs->pc = new_ip;
if (current->mm)
regs->p5 = current->mm->start_data;
#ifdef CONFIG_SMP
#ifndef CONFIG_SMP
task_thread_info(current)->l1_task_info.stack_start =
(void *)current->mm->context.stack_start;
task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;

View File

@ -315,7 +315,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &data,
to_copy, 0);
to_copy, 1);
if (copied)
break;

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf518/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -70,6 +74,10 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@ -133,5 +141,7 @@
#define ANOMALY_05000450 (0)
#define ANOMALY_05000465 (0)
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#endif

View File

@ -1,14 +1,18 @@
/*
* File: include/asm-blackfin/mach-bf527/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
* - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List
* - Revision F, 03/03/2009; ADSP-BF527 Blackfin Processor Anomaly List
* - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@ -200,6 +204,10 @@
#define ANOMALY_05000467 (1)
/* PLL Latches Incorrect Settings During Reset */
#define ANOMALY_05000469 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@ -250,5 +258,7 @@
#define ANOMALY_05000412 (0)
#define ANOMALY_05000447 (0)
#define ANOMALY_05000448 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#endif

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf533/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -202,6 +206,10 @@
#define ANOMALY_05000443 (1)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* These anomalies have been "phased" out of analog.com anomaly sheets and are
* here to show running on older silicon just isn't feasible.
@ -349,5 +357,7 @@
#define ANOMALY_05000450 (0)
#define ANOMALY_05000465 (0)
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#endif

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf537/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -156,6 +160,10 @@
#define ANOMALY_05000443 (1)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@ -202,5 +210,7 @@
#define ANOMALY_05000450 (0)
#define ANOMALY_05000465 (0)
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#endif

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf538/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -128,6 +132,10 @@
#define ANOMALY_05000443 (1)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@ -176,5 +184,7 @@
#define ANOMALY_05000450 (0)
#define ANOMALY_05000465 (0)
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#endif

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf548/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -24,6 +28,8 @@
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
/* Data Corruption with Cached External Memory and Non-Cached On-Chip L2 Memory */
#define ANOMALY_05000220 (1)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
@ -200,6 +206,14 @@
#define ANOMALY_05000466 (1)
/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
#define ANOMALY_05000467 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
#define ANOMALY_05000474 (1)
/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@ -215,7 +229,6 @@
#define ANOMALY_05000198 (0)
#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)

View File

@ -19,6 +19,16 @@
\reg\().h = _corelock;
.endm
.macro safe_testset addr:req, scratch:req
#if ANOMALY_05000477
cli \scratch;
testset (\addr);
sti \scratch;
#else
testset (\addr);
#endif
.endm
/*
* r0 = address of atomic data to flush and invalidate (32bit).
*
@ -33,7 +43,7 @@ ENTRY(_get_core_lock)
cli r0;
coreslot_loadaddr p0;
.Lretry_corelock:
testset (p0);
safe_testset p0, r2;
if cc jump .Ldone_corelock;
SSYNC(r2);
jump .Lretry_corelock
@ -56,7 +66,7 @@ ENTRY(_get_core_lock_noflush)
cli r0;
coreslot_loadaddr p0;
.Lretry_corelock_noflush:
testset (p0);
safe_testset p0, r2;
if cc jump .Ldone_corelock_noflush;
SSYNC(r2);
jump .Lretry_corelock_noflush

View File

@ -1,9 +1,13 @@
/*
* File: include/asm-blackfin/mach-bf561/anomaly.h
* Bugs: Enter bugs at http://blackfin.uclinux.org/
* DO NOT EDIT THIS FILE
* This file is under version control at
* svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
* Copyright (C) 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the ADI BSD license.
* https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
*/
/* This file should be up to date with:
@ -213,7 +217,11 @@
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 5)
/* Temporarily walk around for bug 5423 till this issue is confirmed by
* official anomaly document. It looks 05000281 still exists on bf561
* v0.5.
*/
#define ANOMALY_05000281 (__SILICON_REVISION__ <= 5)
/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (1)
/* Reads Will Receive Incorrect Data under Certain Conditions */
@ -280,6 +288,12 @@
#define ANOMALY_05000443 (1)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
#define ANOMALY_05000473 (1)
/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
#define ANOMALY_05000475 (__SILICON_REVISION__ < 4)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000119 (0)
@ -304,5 +318,6 @@
#define ANOMALY_05000450 (0)
#define ANOMALY_05000465 (0)
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#endif

View File

@ -57,3 +57,8 @@
(!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
#if ANOMALY_05000475 && \
(defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
# error "Anomaly 475 does not allow you to use Write Back cache with L2 or External Memory"
#endif

View File

@ -276,10 +276,9 @@ void smp_send_reschedule(int cpu)
if (cpu_is_offline(cpu))
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_RESCHEDULE;
@ -305,10 +304,9 @@ void smp_send_stop(void)
if (cpus_empty(callmap))
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_CPU_STOP;

View File

@ -358,7 +358,14 @@ config SGI_IP22
select SWAP_IO_SPACE
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
select SYS_HAS_EARLY_PRINTK
#
# Disable EARLY_PRINTK for now since it leads to overwritten prom
# memory during early boot on some machines.
#
# See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
# for a more details discussion
#
# select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@ -410,7 +417,14 @@ config SGI_IP28
select SGI_HAS_ZILOG
select SWAP_IO_SPACE
select SYS_HAS_CPU_R10000
select SYS_HAS_EARLY_PRINTK
#
# Disable EARLY_PRINTK for now since it leads to overwritten prom
# memory during early boot on some machines.
#
# See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
# for a more details discussion
#
# select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
help
@ -1439,6 +1453,7 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@ -1763,7 +1778,7 @@ config SYS_SUPPORTS_SMARTMIPS
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA
depends on !NUMA && !CPU_LOONGSON2
config ARCH_DISCONTIGMEM_ENABLE
bool

View File

@ -75,6 +75,7 @@
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
#define MADV_HWPOISON 100 /* poison a page for testing */
/* compatibility flags */
#define MAP_FILE 0

View File

@ -12,6 +12,7 @@
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>
@ -193,10 +194,6 @@ extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 v
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
@ -205,11 +202,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
case 8:
return __xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
\
((__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);

View File

@ -28,7 +28,7 @@
#define dbg(x...)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];

View File

@ -78,9 +78,6 @@ SECTIONS
*/
. = ALIGN(PAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(16)
NOTES
/* unwind info */
.PARISC.unwind : {
@ -89,6 +86,9 @@ SECTIONS
__stop___unwind = .;
}
EXCEPTION_TABLE(16)
NOTES
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)

View File

@ -29,5 +29,16 @@ enum km_type {
KM_TYPE_NR
};
/*
* This is a temporary build fix that (so they say on lkml....) should no longer
* be required after 2.6.33, because of changes planned to the kmap code.
* Let's try to remove this cruft then.
*/
#ifdef CONFIG_DEBUG_HIGHMEM
#define KM_NMI (-1)
#define KM_NMI_PTE (-1)
#define KM_IRQ_PTE (-1)
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */

View File

@ -45,7 +45,7 @@ extern void free_initmem(void);
#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
extern unsigned long vmemmap_table[VMEMMAP_SIZE];
#endif

View File

@ -79,7 +79,8 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
struct cpuinfo_x86 *c = &cpu_data(pr->id);
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
if (c->x86_vendor == X86_VENDOR_INTEL ||
c->x86_vendor == X86_VENDOR_CENTAUR)
init_intel_pdc(pr, c);
return;

View File

@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
select ASYNC_CORE
select ASYNC_PQ
config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool
config ASYNC_TX_DISABLE_XOR_VAL_DMA
bool

View File

@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
disks, len);
}
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
NULL, 0, blocks, disks,
len);
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];

View File

@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
memcmp(a, a + 4, len - 4) == 0);
}
static inline struct dma_chan *
xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
struct page **src_list, int src_cnt, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
src_cnt, len);
}
/**
* async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
&dest, 1, src_list,
src_cnt, len);
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t *dma_src = NULL;

View File

@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
struct crypto_gcm_ghash_ctx {
unsigned int cryptlen;
struct scatterlist *src;
crypto_completion_t complete;
void (*complete)(struct aead_request *req, int err);
};
struct crypto_gcm_req_priv_ctx {
@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req,
return crypto_ahash_final(ahreq);
}
static void gcm_hash_final_done(struct crypto_async_request *areq,
int err)
static void __gcm_hash_final_done(struct aead_request *req, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
if (!err)
crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
gctx->complete(areq, err);
gctx->complete(req, err);
}
static void gcm_hash_len_done(struct crypto_async_request *areq,
int err)
static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
__gcm_hash_final_done(req, err);
}
static void __gcm_hash_len_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
return;
}
gcm_hash_final_done(areq, err);
__gcm_hash_final_done(req, err);
}
static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
int err)
static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
__gcm_hash_len_done(req, err);
}
static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
return;
}
gcm_hash_len_done(areq, err);
__gcm_hash_len_done(req, err);
}
static void gcm_hash_crypt_done(struct crypto_async_request *areq,
static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
__gcm_hash_crypt_remain_done(req, err);
}
static void __gcm_hash_crypt_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
return;
}
gcm_hash_crypt_remain_done(areq, err);
__gcm_hash_crypt_remain_done(req, err);
}
static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
int err)
static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
__gcm_hash_crypt_done(req, err);
}
static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_completion_t complete;
@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
}
if (remain)
gcm_hash_crypt_done(areq, err);
__gcm_hash_crypt_done(req, err);
else
gcm_hash_crypt_remain_done(areq, err);
__gcm_hash_crypt_remain_done(req, err);
}
static void gcm_hash_assoc_done(struct crypto_async_request *areq,
static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
__gcm_hash_assoc_remain_done(req, err);
}
static void __gcm_hash_assoc_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
unsigned int remain;
@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
return;
}
gcm_hash_assoc_remain_done(areq, err);
__gcm_hash_assoc_remain_done(req, err);
}
static void gcm_hash_init_done(struct crypto_async_request *areq,
int err)
static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
__gcm_hash_assoc_done(req, err);
}
static void __gcm_hash_init_done(struct aead_request *req, int err)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
crypto_completion_t complete;
unsigned int remain = 0;
@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
}
if (remain)
gcm_hash_assoc_done(areq, err);
__gcm_hash_assoc_done(req, err);
else
gcm_hash_assoc_remain_done(areq, err);
__gcm_hash_assoc_remain_done(req, err);
}
static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
__gcm_hash_init_done(req, err);
}
static int gcm_hash(struct aead_request *req,
@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
crypto_aead_authsize(aead), 1);
}
static void gcm_enc_hash_done(struct crypto_async_request *areq,
int err)
static void gcm_enc_hash_done(struct aead_request *req, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
aead_request_complete(req, err);
}
static void gcm_encrypt_done(struct crypto_async_request *areq,
int err)
static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
err = gcm_hash(req, pctx);
if (err == -EINPROGRESS || err == -EBUSY)
return;
else if (!err) {
crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
gcm_enc_copy_hash(req, pctx);
}
}
gcm_enc_hash_done(areq, err);
aead_request_complete(req, err);
}
static int crypto_gcm_encrypt(struct aead_request *req)
@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
aead_request_complete(req, err);
}
static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
static void gcm_dec_hash_done(struct aead_request *req, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
err = crypto_ablkcipher_decrypt(abreq);
if (err == -EINPROGRESS || err == -EBUSY)
return;
else if (!err)
err = crypto_gcm_verify(req, pctx);
}
gcm_decrypt_done(areq, err);
aead_request_complete(req, err);
}
static int crypto_gcm_decrypt(struct aead_request *req)

View File

@ -203,8 +203,9 @@ static const union acpi_predefined_info predefined_names[] =
{{"_BCT", 1, ACPI_RTYPE_INTEGER}},
{{"_BDN", 0, ACPI_RTYPE_INTEGER}},
{{"_BFS", 1, 0}},
{{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4,0}},
{{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
{{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,

View File

@ -224,6 +224,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* _OSI(Linux) helps sound
* DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
* DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
* T400, T500
* _OSI(Linux) has Linux specific hooks
* DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
* _OSI(Linux) is a NOP:
@ -254,6 +255,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
},
},
{
.callback = dmi_enable_osi_linux,
.ident = "Lenovo ThinkPad T400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
},
},
{
.callback = dmi_enable_osi_linux,
.ident = "Lenovo ThinkPad T500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
},
},
{}
};

View File

@ -430,6 +430,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),

View File

@ -707,34 +707,17 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
return ata_dev_classify(&tf);
}
static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
{
/* FIXME: Never skip softreset, sata_fsl_softreset() is
* combination of soft and hard resets. sata_fsl_softreset()
* needs to be splitted into soft and hard resets.
*/
return 0;
}
static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
int pmp = sata_srst_pmp(link);
u32 temp;
struct ata_taskfile tf;
u8 *cfis;
u32 Serror;
int i = 0;
unsigned long start_jiffies;
DPRINTK("in xx_softreset\n");
if (pmp != SATA_PMP_CTRL_PORT)
goto issue_srst;
DPRINTK("in xx_hardreset\n");
try_offline_again:
/*
@ -749,7 +732,7 @@ try_offline_again:
if (temp & ONLINE) {
ata_port_printk(ap, KERN_ERR,
"Softreset failed, not off-lined %d\n", i);
"Hardreset failed, not off-lined %d\n", i);
/*
* Try to offline controller atleast twice
@ -761,7 +744,7 @@ try_offline_again:
goto try_offline_again;
}
DPRINTK("softreset, controller off-lined\n");
DPRINTK("hardreset, controller off-lined\n");
VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
@ -786,11 +769,11 @@ try_offline_again:
if (!(temp & ONLINE)) {
ata_port_printk(ap, KERN_ERR,
"Softreset failed, not on-lined\n");
"Hardreset failed, not on-lined\n");
goto err;
}
DPRINTK("softreset, controller off-lined & on-lined\n");
DPRINTK("hardreset, controller off-lined & on-lined\n");
VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
@ -806,7 +789,7 @@ try_offline_again:
"No Device OR PHYRDY change,Hstatus = 0x%x\n",
ioread32(hcr_base + HSTATUS));
*class = ATA_DEV_NONE;
goto out;
return 0;
}
/*
@ -819,11 +802,44 @@ try_offline_again:
if ((temp & 0xFF) != 0x18) {
ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
*class = ATA_DEV_NONE;
goto out;
goto do_followup_srst;
} else {
ata_port_printk(ap, KERN_INFO,
"Signature Update detected @ %d msecs\n",
jiffies_to_msecs(jiffies - start_jiffies));
*class = sata_fsl_dev_classify(ap);
return 0;
}
do_followup_srst:
/*
* request libATA to perform follow-up softreset
*/
return -EAGAIN;
err:
return -EIO;
}
static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
int pmp = sata_srst_pmp(link);
u32 temp;
struct ata_taskfile tf;
u8 *cfis;
u32 Serror;
DPRINTK("in xx_softreset\n");
if (ata_link_offline(link)) {
DPRINTK("PHY reports no device\n");
*class = ATA_DEV_NONE;
return 0;
}
/*
@ -834,7 +850,6 @@ try_offline_again:
* reached here, we can send a command to the target device
*/
issue_srst:
DPRINTK("Sending SRST/device reset\n");
ata_tf_init(link->device, &tf);
@ -860,6 +875,8 @@ issue_srst:
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
iowrite32(0xFFFF, CC + hcr_base);
if (pmp != SATA_PMP_CTRL_PORT)
iowrite32(pmp, CQPMP + hcr_base);
iowrite32(1, CQ + hcr_base);
temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
@ -926,7 +943,6 @@ issue_srst:
VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
}
out:
return 0;
err:
@ -988,18 +1004,6 @@ static void sata_fsl_error_intr(struct ata_port *ap)
ehi->err_mask |= AC_ERR_ATA_BUS;
ehi->action |= ATA_EH_SOFTRESET;
/*
* Ignore serror in case of fatal errors as we always want
* to do a soft-reset of the FSL SATA controller. Analyzing
* serror may cause libata to schedule a hard-reset action,
* and hard-reset currently does not do controller
* offline/online, causing command timeouts and leads to an
* un-recoverable state, hence make libATA ignore
* autopsy in case of fatal errors.
*/
ehi->flags |= ATA_EHI_NO_AUTOPSY;
freeze = 1;
}
@ -1267,8 +1271,8 @@ static struct ata_port_operations sata_fsl_ops = {
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
.prereset = sata_fsl_prereset,
.softreset = sata_fsl_softreset,
.hardreset = sata_fsl_hardreset,
.pmp_softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,

View File

@ -328,11 +328,11 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
* necessary.
*/
parent = dev->parent;
spin_unlock_irq(&dev->power.lock);
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
spin_lock_irq(&parent->power.lock);
spin_lock(&parent->power.lock);
/*
* We can resume if the parent's run-time PM is disabled or it
* is set to ignore children.
@ -343,9 +343,9 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
}
spin_unlock_irq(&parent->power.lock);
spin_unlock(&parent->power.lock);
spin_lock_irq(&dev->power.lock);
spin_lock(&dev->power.lock);
if (retval)
goto out;
goto repeat;
@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
}
if (parent) {
spin_lock_irq(&parent->power.lock);
spin_lock(&parent->power.lock);
/*
* It is invalid to put an active child under a parent that is
@ -793,7 +793,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
atomic_inc(&parent->power.child_count);
}
spin_unlock_irq(&parent->power.lock);
spin_unlock(&parent->power.lock);
if (error)
goto out;

View File

@ -482,7 +482,7 @@ static ssize_t host_store_rescan(struct device *dev,
return count;
}
DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static ssize_t dev_show_unique_id(struct device *dev,
struct device_attribute *attr,
@ -512,7 +512,7 @@ static ssize_t dev_show_unique_id(struct device *dev,
sn[8], sn[9], sn[10], sn[11],
sn[12], sn[13], sn[14], sn[15]);
}
DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
static ssize_t dev_show_vendor(struct device *dev,
struct device_attribute *attr,
@ -536,7 +536,7 @@ static ssize_t dev_show_vendor(struct device *dev,
else
return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
}
DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
static ssize_t dev_show_model(struct device *dev,
struct device_attribute *attr,
@ -560,7 +560,7 @@ static ssize_t dev_show_model(struct device *dev,
else
return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
}
DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
static ssize_t dev_show_rev(struct device *dev,
struct device_attribute *attr,
@ -584,7 +584,7 @@ static ssize_t dev_show_rev(struct device *dev,
else
return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
}
DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
static ssize_t cciss_show_lunid(struct device *dev,
struct device_attribute *attr, char *buf)
@ -609,7 +609,7 @@ static ssize_t cciss_show_lunid(struct device *dev,
lunid[0], lunid[1], lunid[2], lunid[3],
lunid[4], lunid[5], lunid[6], lunid[7]);
}
DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
static ssize_t cciss_show_raid_level(struct device *dev,
struct device_attribute *attr, char *buf)
@ -632,7 +632,7 @@ static ssize_t cciss_show_raid_level(struct device *dev,
return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
raid_label[raid]);
}
DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
static ssize_t cciss_show_usage_count(struct device *dev,
struct device_attribute *attr, char *buf)
@ -651,7 +651,7 @@ static ssize_t cciss_show_usage_count(struct device *dev,
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return snprintf(buf, 20, "%d\n", count);
}
DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
static struct attribute *cciss_host_attrs[] = {
&dev_attr_rescan.attr,

View File

@ -62,6 +62,7 @@
#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
/* cover 915 and 945 variants */
@ -96,7 +97,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
extern int agp_memory_reserved;
@ -1358,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@ -2359,6 +2362,8 @@ static const struct intel_driver_description {
"IGDNG/M", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
"IGDNG/MA", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
"IGDNG/MC2", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@ -2560,6 +2565,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
{ }
};

View File

@ -1249,7 +1249,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
if (keycode >= NR_KEYS)
if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1);
keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
else
return;
else

View File

@ -219,8 +219,11 @@ int tty_port_block_til_ready(struct tty_port *port,
/* if non-blocking mode is set we can pass directly to open unless
the port has just hung up or is in another error state */
if ((filp->f_flags & O_NONBLOCK) ||
(tty->flags & (1 << TTY_IO_ERROR))) {
if (tty->flags & (1 << TTY_IO_ERROR)) {
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
if (filp->f_flags & O_NONBLOCK) {
/* Indicate we are open */
if (tty->termios->c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);

View File

@ -103,8 +103,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
ve->event.event = event;
/* kernel view is consoles 0..n-1, user space view is
console 1..n with 0 meaning current, so we must bias */
ve->event.old = old + 1;
ve->event.new = new + 1;
ve->event.oldev = old + 1;
ve->event.newev = new + 1;
wake = 1;
ve->done = 1;
}
@ -186,7 +186,7 @@ int vt_waitactive(int n)
vt_event_wait(&vw);
if (vw.done == 0)
return -EINTR;
} while (vw.event.new != n);
} while (vw.event.newev != n);
return 0;
}

View File

@ -236,7 +236,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
* We could avoid some copying here but it's probably not worth it.
*/
if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
ecb_crypt_copy(in, out, key, cword, count);
return;
}
@ -248,7 +248,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
u8 *iv, struct cword *cword, int count)
{
/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
return cbc_crypt_copy(in, out, key, iv, cword, count);
return rep_xcrypt_cbc(in, out, key, iv, cword, count);

View File

@ -26,6 +26,8 @@ config INTEL_IOATDMA
select DMA_ENGINE
select DCA
select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets.

View File

@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false;
#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
return false;
#endif
#endif
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
if (!dma_has_cap(DMA_PQ, device->cap_mask))
return false;
#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
return false;
#endif
#endif
return true;

View File

@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
cpuid_level_9 = cpuid_eax(9);
res = test_bit(0, &cpuid_level_9);
if (!res)
dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
return res;
}
static int system_has_dca_enabled(struct pci_dev *pdev)
int system_has_dca_enabled(struct pci_dev *pdev)
{
if (boot_cpu_has(X86_FEATURE_DCA))
return dca_enabled_in_bios(pdev);
dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
return 0;
}

View File

@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status)
/* channel was fatally programmed */
static inline bool is_ioat_bug(unsigned long err)
{
return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
IOAT_CHANERR_LENGTH_ERR));
return !!err;
}
static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,

View File

@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
BUG_ON(is_ioat_bug(chanerr));
}

View File

@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
BUG_ON(is_ioat_bug(chanerr));
}
@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
dump_desc_dbg(ioat, compl_desc);
/* we leave the channel locked to ensure in order submission */
return &desc->txd;
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
dump_desc_dbg(ioat, compl_desc);
/* we leave the channel locked to ensure in order submission */
return &desc->txd;
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
if (flags & DMA_PREP_PQ_DISABLE_Q)
dst[1] = dst[0];
/* handle the single source multiply case from the raid6
* recovery path
*/
if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
dma_addr_t single_source[2];
unsigned char single_source_coef[2];
@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
if (flags & DMA_PREP_PQ_DISABLE_Q)
pq[1] = pq[0];
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
dma_addr_t pq[2];
memset(scf, 0, src_cnt);
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[0] = dst;
pq[1] = ~0;
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags);
@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
*result = 0;
memset(scf, 0, src_cnt);
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[0] = src[0];
pq[1] = ~0;
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
len, flags);
@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
int dca_en = system_has_dca_enabled(pdev);
struct dma_device *dma;
struct dma_chan *c;
struct ioat_chan_common *chan;
@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
/* dca is incompatible with raid operations */
if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->timer_fn = ioat2_timer_event;
}
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
dma->device_prep_dma_pq_val = NULL;
#endif
#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = NULL;
#endif
/* -= IOAT ver.3 workarounds =- */
/* Write CHANERRMSK_INT with 3E07h to mask out the errors
* that can cause stability issues for IOAT ver.3

View File

@ -39,6 +39,8 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
uint32_t size;
union {

View File

@ -92,9 +92,7 @@
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
IOAT_CHANCTRL_ERR_INT_EN)
IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */

View File

@ -640,17 +640,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
#endif
struct sh_dmae_device *shdev;
/* get platform data */
if (!pdev->dev.platform_data)
return -ENODEV;
shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "No enough memory\n");
err = -ENOMEM;
goto shdev_err;
return -ENOMEM;
}
/* get platform data */
if (!pdev->dev.platform_data)
goto shdev_err;
/* platform data */
memcpy(&shdev->pdata, pdev->dev.platform_data,
sizeof(struct sh_dmae_pdata));
@ -722,7 +721,6 @@ eirq_err:
rst_err:
kfree(shdev);
shdev_err:
return err;
}

View File

@ -275,7 +275,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@ -286,6 +286,7 @@ static void log_irqs(u32 evt)
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
@ -293,6 +294,7 @@ static void log_irqs(u32 evt)
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@ -1439,6 +1441,17 @@ static irqreturn_t irq_handler(int irq, void *data)
OHCI1394_LinkControl_cycleMaster);
}
if (unlikely(event & OHCI1394_cycleInconsistent)) {
/*
* We need to clear this event bit in order to make
* cycleMatch isochronous I/O work. In theory we should
* stop active cycleMatch iso contexts now and restart
* them at least two cycles later. (FIXME?)
*/
if (printk_ratelimit())
fw_notify("isochronous cycle inconsistent\n");
}
if (event & OHCI1394_cycle64Seconds) {
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if ((cycle_time & 0x80000000) == 0)
@ -1528,6 +1541,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
OHCI1394_cycleInconsistent |
OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
@ -1890,15 +1904,30 @@ static int handle_it_packet(struct context *context,
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
int i;
struct descriptor *pd;
if (last->transfer_status == 0)
/* This descriptor isn't done yet, stop iteration. */
for (pd = d; pd <= last; pd++)
if (pd->transfer_status)
break;
if (pd > last)
/* Descriptor(s) not done yet, stop iteration */
return 0;
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
i = ctx->header_length;
if (i + 4 < PAGE_SIZE) {
/* Present this value as big-endian to match the receive code */
*(__be32 *)(ctx->header + i) = cpu_to_be32(
((u32)le16_to_cpu(pd->transfer_status) << 16) |
le16_to_cpu(pd->res_count));
ctx->header_length += 4;
}
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
0, NULL, ctx->base.callback_data);
ctx->header_length, ctx->header,
ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}

View File

@ -92,6 +92,7 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
depends on AGP_INTEL
select SHMEM
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA

View File

@ -662,6 +662,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)

View File

@ -599,7 +599,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
if (var->pixclock == -1 || !var->pixclock)
if (var->pixclock != 0)
return -EINVAL;
/* Need to resize the fb object !!! */
@ -691,7 +691,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
int ret;
int i;
if (var->pixclock != -1) {
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLCOK SET\n");
return -EINVAL;
}
@ -904,7 +904,7 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = -1;
info->var.pixclock = 0;
if (register_framebuffer(info) < 0)
return -EINVAL;
} else {

View File

@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.

View File

@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
return child;
}
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
spin_unlock(&mm->unused_lock);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
spin_unlock(&mm->unused_lock);
}
}

View File

@ -267,10 +267,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
uint32_t *mem;
for (page = 0; page < page_count; page++) {
mem = kmap(pages[page]);
mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
kunmap(pages[page]);
kunmap_atomic(pages[page], KM_USER0);
}
}

View File

@ -296,6 +296,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
u32 saveTRANSACONF;
u32 saveTRANS_HTOTAL_A;
u32 saveTRANS_HBLANK_A;
u32 saveTRANS_HSYNC_A;
@ -326,6 +327,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
u32 saveTRANSBCONF;
u32 saveTRANS_HTOTAL_B;
u32 saveTRANS_HBLANK_B;
u32 saveTRANS_HSYNC_B;
@ -414,6 +416,16 @@ typedef struct drm_i915_private {
u32 savePFB_WIN_SZ;
u32 savePFA_WIN_POS;
u32 savePFB_WIN_POS;
u32 savePCH_DREF_CONTROL;
u32 saveDISP_ARB_CTL;
u32 savePIPEA_DATA_M1;
u32 savePIPEA_DATA_N1;
u32 savePIPEA_LINK_M1;
u32 savePIPEA_LINK_N1;
u32 savePIPEB_DATA_M1;
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
struct {
struct drm_mm gtt_space;

View File

@ -254,10 +254,15 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir;
u32 de_iir, gt_iir, de_ier;
u32 new_de_iir, new_gt_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
(void)I915_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
@ -290,6 +295,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
gt_iir = new_gt_iir;
}
I915_WRITE(DEIER, de_ier);
(void)I915_READ(DEIER);
return ret;
}

View File

@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (IS_IGDNG(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
if (IS_IGDNG(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
if (IS_IGDNG(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
if (IS_IGDNG(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (IS_IGDNG(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (IS_IGDNG(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);

View File

@ -262,8 +262,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
} while (time_after(timeout, jiffies));
}
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
CRT_HOTPLUG_MONITOR_COLOR)
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
CRT_HOTPLUG_MONITOR_NONE)
return true;
return false;

View File

@ -863,10 +863,8 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
int err_most = 47;
found = false;
int err_min = 10000;
/* eDP has only 2 clock choice, no n/m/p setting */
if (HAS_eDP)
@ -890,10 +888,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
/* based on hardware requriment prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
/* based on hardware requirment prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
@ -907,18 +904,18 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
this_err = abs((10000 - (target*10000/clock.dot)));
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
max_n = clock.n;
found = true;
/* found on first matching */
goto out;
} else if (this_err < err_min) {
*best_clock = clock;
err_min = this_err;
}
}
}
}
}
out:
return found;
return true;
}
/* DisplayPort has only two frequencies, 162MHz and 270MHz */

View File

@ -77,16 +77,34 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
temp = I915_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (IS_IGDNG(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
} else {
temp = I915_READ(hdmi_priv->sdvox_reg);
I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE);
}
POSTING_READ(hdmi_priv->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
temp &= ~SDVO_ENABLE;
} else {
temp |= SDVO_ENABLE;
}
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (IS_IGDNG(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
}
static void intel_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;

View File

@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
(void)ctx->card->reg_read(ctx->card, CU16(base + 1));
ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;

View File

@ -519,6 +519,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
* AGP
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);

View File

@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev)
#endif
}
void radeon_agp_resume(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
int r;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
dev_warn(rdev->dev, "radeon AGP reinit failed\n");
}
#endif
}
void radeon_agp_fini(struct radeon_device *rdev)
{
#if __OS_HAS_AGP

Some files were not shown because too many files have changed in this diff Show More