mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 23:45:31 +08:00
Merge branch 'upstream-fixes' into upstream
Conflicts: drivers/scsi/sata_sil24.c
This commit is contained in:
commit
fec69a9748
@ -19,6 +19,7 @@ Contents:
|
||||
- Control dependencies.
|
||||
- SMP barrier pairing.
|
||||
- Examples of memory barrier sequences.
|
||||
- Read memory barriers vs load speculation.
|
||||
|
||||
(*) Explicit kernel barriers.
|
||||
|
||||
@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
|
||||
we may get either of:
|
||||
|
||||
STORE *A = X; Y = LOAD *A;
|
||||
STORE *A = Y;
|
||||
STORE *A = Y = X;
|
||||
|
||||
|
||||
=========================
|
||||
@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
|
||||
|
||||
(4) General memory barriers.
|
||||
|
||||
A general memory barrier is a combination of both a read memory barrier
|
||||
and a write memory barrier. It is a partial ordering over both loads and
|
||||
stores.
|
||||
A general memory barrier gives a guarantee that all the LOAD and STORE
|
||||
operations specified before the barrier will appear to happen before all
|
||||
the LOAD and STORE operations specified after the barrier with respect to
|
||||
the other components of the system.
|
||||
|
||||
A general memory barrier is a partial ordering over both loads and stores.
|
||||
|
||||
General memory barriers imply both read and write memory barriers, and so
|
||||
can substitute for either.
|
||||
@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
|
||||
=============== ===============
|
||||
a = 1;
|
||||
<write barrier>
|
||||
b = 2; x = a;
|
||||
b = 2; x = b;
|
||||
<read barrier>
|
||||
y = b;
|
||||
y = a;
|
||||
|
||||
Or:
|
||||
|
||||
@ -563,6 +567,18 @@ Or:
|
||||
Basically, the read barrier always has to be there, even though it can be of
|
||||
the "weaker" type.
|
||||
|
||||
[!] Note that the stores before the write barrier would normally be expected to
|
||||
match the loads after the read barrier or data dependency barrier, and vice
|
||||
versa:
|
||||
|
||||
CPU 1 CPU 2
|
||||
=============== ===============
|
||||
a = 1; }---- --->{ v = c
|
||||
b = 2; } \ / { w = d
|
||||
<write barrier> \ <read barrier>
|
||||
c = 3; } / \ { x = a;
|
||||
d = 4; }---- --->{ y = b;
|
||||
|
||||
|
||||
EXAMPLES OF MEMORY BARRIER SEQUENCES
|
||||
------------------------------------
|
||||
@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
|
||||
| | +------+
|
||||
+-------+ : :
|
||||
|
|
||||
| Sequence in which stores committed to memory system
|
||||
| by CPU 1
|
||||
| Sequence in which stores are committed to the
|
||||
| memory system by CPU 1
|
||||
V
|
||||
|
||||
|
||||
@ -683,14 +699,12 @@ then the following will occur:
|
||||
| : : | |
|
||||
| : : | CPU 2 |
|
||||
| +-------+ | |
|
||||
\ | X->9 |------>| |
|
||||
\ +-------+ | |
|
||||
----->| B->2 | | |
|
||||
+-------+ | |
|
||||
Makes sure all effects ---> ddddddddddddddddd | |
|
||||
prior to the store of C +-------+ | |
|
||||
are perceptible to | B->2 |------>| |
|
||||
successive loads +-------+ | |
|
||||
| | X->9 |------>| |
|
||||
| +-------+ | |
|
||||
Makes sure all effects ---> \ ddddddddddddddddd | |
|
||||
prior to the store of C \ +-------+ | |
|
||||
are perceptible to ----->| B->2 |------>| |
|
||||
subsequent loads +-------+ | |
|
||||
: : +-------+
|
||||
|
||||
|
||||
@ -699,73 +713,239 @@ following sequence of events:
|
||||
|
||||
CPU 1 CPU 2
|
||||
======================= =======================
|
||||
{ A = 0, B = 9 }
|
||||
STORE A=1
|
||||
STORE B=2
|
||||
STORE C=3
|
||||
<write barrier>
|
||||
STORE D=4
|
||||
STORE E=5
|
||||
LOAD A
|
||||
STORE B=2
|
||||
LOAD B
|
||||
LOAD C
|
||||
LOAD D
|
||||
LOAD E
|
||||
LOAD A
|
||||
|
||||
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
|
||||
some effectively random order, despite the write barrier issued by CPU 1:
|
||||
|
||||
+-------+ : :
|
||||
| | +------+
|
||||
| |------>| C=3 | }
|
||||
| | : +------+ }
|
||||
| | : | A=1 | }
|
||||
| | : +------+ }
|
||||
| CPU 1 | : | B=2 | }---
|
||||
| | +------+ } \
|
||||
| | wwwwwwwwwwwww} \
|
||||
| | +------+ } \ : : +-------+
|
||||
| | : | E=5 | } \ +-------+ | |
|
||||
| | : +------+ } \ { | C->3 |------>| |
|
||||
| |------>| D=4 | } \ { +-------+ : | |
|
||||
| | +------+ \ { | E->5 | : | |
|
||||
+-------+ : : \ { +-------+ : | |
|
||||
Transfer -->{ | A->1 | : | CPU 2 |
|
||||
from CPU 1 { +-------+ : | |
|
||||
to CPU 2 { | D->4 | : | |
|
||||
{ +-------+ : | |
|
||||
{ | B->2 |------>| |
|
||||
+-------+ | |
|
||||
: : +-------+
|
||||
+-------+ : : : :
|
||||
| | +------+ +-------+
|
||||
| |------>| A=1 |------ --->| A->0 |
|
||||
| | +------+ \ +-------+
|
||||
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
|
||||
| | +------+ | +-------+
|
||||
| |------>| B=2 |--- | : :
|
||||
| | +------+ \ | : : +-------+
|
||||
+-------+ : : \ | +-------+ | |
|
||||
---------->| B->2 |------>| |
|
||||
| +-------+ | CPU 2 |
|
||||
| | A->0 |------>| |
|
||||
| +-------+ | |
|
||||
| : : +-------+
|
||||
\ : :
|
||||
\ +-------+
|
||||
---->| A->1 |
|
||||
+-------+
|
||||
: :
|
||||
|
||||
|
||||
If, however, a read barrier were to be placed between the load of C and the
|
||||
load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
|
||||
perceived correctly by CPU 2.
|
||||
If, however, a read barrier were to be placed between the load of E and the
|
||||
load of A on CPU 2:
|
||||
|
||||
+-------+ : :
|
||||
| | +------+
|
||||
| |------>| C=3 | }
|
||||
| | : +------+ }
|
||||
| | : | A=1 | }---
|
||||
| | : +------+ } \
|
||||
| CPU 1 | : | B=2 | } \
|
||||
| | +------+ \
|
||||
| | wwwwwwwwwwwwwwww \
|
||||
| | +------+ \ : : +-------+
|
||||
| | : | E=5 | } \ +-------+ | |
|
||||
| | : +------+ }--- \ { | C->3 |------>| |
|
||||
| |------>| D=4 | } \ \ { +-------+ : | |
|
||||
| | +------+ \ -->{ | B->2 | : | |
|
||||
+-------+ : : \ { +-------+ : | |
|
||||
\ { | A->1 | : | CPU 2 |
|
||||
\ +-------+ | |
|
||||
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
|
||||
barrier causes all effects \ +-------+ | |
|
||||
prior to the storage of C \ { | E->5 | : | |
|
||||
to be perceptible to CPU 2 -->{ +-------+ : | |
|
||||
{ | D->4 |------>| |
|
||||
+-------+ | |
|
||||
: : +-------+
|
||||
CPU 1 CPU 2
|
||||
======================= =======================
|
||||
{ A = 0, B = 9 }
|
||||
STORE A=1
|
||||
<write barrier>
|
||||
STORE B=2
|
||||
LOAD B
|
||||
<read barrier>
|
||||
LOAD A
|
||||
|
||||
then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
|
||||
2:
|
||||
|
||||
+-------+ : : : :
|
||||
| | +------+ +-------+
|
||||
| |------>| A=1 |------ --->| A->0 |
|
||||
| | +------+ \ +-------+
|
||||
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
|
||||
| | +------+ | +-------+
|
||||
| |------>| B=2 |--- | : :
|
||||
| | +------+ \ | : : +-------+
|
||||
+-------+ : : \ | +-------+ | |
|
||||
---------->| B->2 |------>| |
|
||||
| +-------+ | CPU 2 |
|
||||
| : : | |
|
||||
| : : | |
|
||||
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
|
||||
barrier causes all effects \ +-------+ | |
|
||||
prior to the storage of B ---->| A->1 |------>| |
|
||||
to be perceptible to CPU 2 +-------+ | |
|
||||
: : +-------+
|
||||
|
||||
|
||||
To illustrate this more completely, consider what could happen if the code
|
||||
contained a load of A either side of the read barrier:
|
||||
|
||||
CPU 1 CPU 2
|
||||
======================= =======================
|
||||
{ A = 0, B = 9 }
|
||||
STORE A=1
|
||||
<write barrier>
|
||||
STORE B=2
|
||||
LOAD B
|
||||
LOAD A [first load of A]
|
||||
<read barrier>
|
||||
LOAD A [second load of A]
|
||||
|
||||
Even though the two loads of A both occur after the load of B, they may both
|
||||
come up with different values:
|
||||
|
||||
+-------+ : : : :
|
||||
| | +------+ +-------+
|
||||
| |------>| A=1 |------ --->| A->0 |
|
||||
| | +------+ \ +-------+
|
||||
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
|
||||
| | +------+ | +-------+
|
||||
| |------>| B=2 |--- | : :
|
||||
| | +------+ \ | : : +-------+
|
||||
+-------+ : : \ | +-------+ | |
|
||||
---------->| B->2 |------>| |
|
||||
| +-------+ | CPU 2 |
|
||||
| : : | |
|
||||
| : : | |
|
||||
| +-------+ | |
|
||||
| | A->0 |------>| 1st |
|
||||
| +-------+ | |
|
||||
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
|
||||
barrier causes all effects \ +-------+ | |
|
||||
prior to the storage of B ---->| A->1 |------>| 2nd |
|
||||
to be perceptible to CPU 2 +-------+ | |
|
||||
: : +-------+
|
||||
|
||||
|
||||
But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
|
||||
before the read barrier completes anyway:
|
||||
|
||||
+-------+ : : : :
|
||||
| | +------+ +-------+
|
||||
| |------>| A=1 |------ --->| A->0 |
|
||||
| | +------+ \ +-------+
|
||||
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
|
||||
| | +------+ | +-------+
|
||||
| |------>| B=2 |--- | : :
|
||||
| | +------+ \ | : : +-------+
|
||||
+-------+ : : \ | +-------+ | |
|
||||
---------->| B->2 |------>| |
|
||||
| +-------+ | CPU 2 |
|
||||
| : : | |
|
||||
\ : : | |
|
||||
\ +-------+ | |
|
||||
---->| A->1 |------>| 1st |
|
||||
+-------+ | |
|
||||
rrrrrrrrrrrrrrrrr | |
|
||||
+-------+ | |
|
||||
| A->1 |------>| 2nd |
|
||||
+-------+ | |
|
||||
: : +-------+
|
||||
|
||||
|
||||
The guarantee is that the second load will always come up with A == 1 if the
|
||||
load of B came up with B == 2. No such guarantee exists for the first load of
|
||||
A; that may come up with either A == 0 or A == 1.
|
||||
|
||||
|
||||
READ MEMORY BARRIERS VS LOAD SPECULATION
|
||||
----------------------------------------
|
||||
|
||||
Many CPUs speculate with loads: that is they see that they will need to load an
|
||||
item from memory, and they find a time where they're not using the bus for any
|
||||
other loads, and so do the load in advance - even though they haven't actually
|
||||
got to that point in the instruction execution flow yet. This permits the
|
||||
actual load instruction to potentially complete immediately because the CPU
|
||||
already has the value to hand.
|
||||
|
||||
It may turn out that the CPU didn't actually need the value - perhaps because a
|
||||
branch circumvented the load - in which case it can discard the value or just
|
||||
cache it for later use.
|
||||
|
||||
Consider:
|
||||
|
||||
CPU 1 CPU 2
|
||||
======================= =======================
|
||||
LOAD B
|
||||
DIVIDE } Divide instructions generally
|
||||
DIVIDE } take a long time to perform
|
||||
LOAD A
|
||||
|
||||
Which might appear as this:
|
||||
|
||||
: : +-------+
|
||||
+-------+ | |
|
||||
--->| B->2 |------>| |
|
||||
+-------+ | CPU 2 |
|
||||
: :DIVIDE | |
|
||||
+-------+ | |
|
||||
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
|
||||
division speculates on the +-------+ ~ | |
|
||||
LOAD of A : : ~ | |
|
||||
: :DIVIDE | |
|
||||
: : ~ | |
|
||||
Once the divisions are complete --> : : ~-->| |
|
||||
the CPU can then perform the : : | |
|
||||
LOAD with immediate effect : : +-------+
|
||||
|
||||
|
||||
Placing a read barrier or a data dependency barrier just before the second
|
||||
load:
|
||||
|
||||
CPU 1 CPU 2
|
||||
======================= =======================
|
||||
LOAD B
|
||||
DIVIDE
|
||||
DIVIDE
|
||||
<read barrier>
|
||||
LOAD A
|
||||
|
||||
will force any value speculatively obtained to be reconsidered to an extent
|
||||
dependent on the type of barrier used. If there was no change made to the
|
||||
speculated memory location, then the speculated value will just be used:
|
||||
|
||||
: : +-------+
|
||||
+-------+ | |
|
||||
--->| B->2 |------>| |
|
||||
+-------+ | CPU 2 |
|
||||
: :DIVIDE | |
|
||||
+-------+ | |
|
||||
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
|
||||
division speculates on the +-------+ ~ | |
|
||||
LOAD of A : : ~ | |
|
||||
: :DIVIDE | |
|
||||
: : ~ | |
|
||||
: : ~ | |
|
||||
rrrrrrrrrrrrrrrr~ | |
|
||||
: : ~ | |
|
||||
: : ~-->| |
|
||||
: : | |
|
||||
: : +-------+
|
||||
|
||||
|
||||
but if there was an update or an invalidation from another CPU pending, then
|
||||
the speculation will be cancelled and the value reloaded:
|
||||
|
||||
: : +-------+
|
||||
+-------+ | |
|
||||
--->| B->2 |------>| |
|
||||
+-------+ | CPU 2 |
|
||||
: :DIVIDE | |
|
||||
+-------+ | |
|
||||
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
|
||||
division speculates on the +-------+ ~ | |
|
||||
LOAD of A : : ~ | |
|
||||
: :DIVIDE | |
|
||||
: : ~ | |
|
||||
: : ~ | |
|
||||
rrrrrrrrrrrrrrrrr | |
|
||||
+-------+ | |
|
||||
The speculation is discarded ---> --->| A->1 |------>| |
|
||||
and an updated value is +-------+ | |
|
||||
retrieved : : +-------+
|
||||
|
||||
|
||||
========================
|
||||
@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
|
||||
===============================
|
||||
|
||||
Some of the other functions in the linux kernel imply memory barriers, amongst
|
||||
which are locking, scheduling and memory allocation functions.
|
||||
which are locking and scheduling functions.
|
||||
|
||||
This specification is a _minimum_ guarantee; any particular architecture may
|
||||
provide more substantial guarantees, but these may not be relied upon outside
|
||||
@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
|
||||
barriers is that the effects instructions outside of a critical section may
|
||||
seep into the inside of the critical section.
|
||||
|
||||
A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
|
||||
because it is possible for an access preceding the LOCK to happen after the
|
||||
LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
|
||||
two accesses can themselves then cross:
|
||||
|
||||
*A = a;
|
||||
LOCK
|
||||
UNLOCK
|
||||
*B = b;
|
||||
|
||||
may occur as:
|
||||
|
||||
LOCK, STORE *B, STORE *A, UNLOCK
|
||||
|
||||
Locks and semaphores may not provide any guarantee of ordering on UP compiled
|
||||
systems, and so cannot be counted on in such a situation to actually achieve
|
||||
anything at all - especially with respect to I/O accesses - unless combined
|
||||
@ -1016,8 +1210,6 @@ Other functions that imply barriers:
|
||||
|
||||
(*) schedule() and similar imply full memory barriers.
|
||||
|
||||
(*) Memory allocation and release functions imply full memory barriers.
|
||||
|
||||
|
||||
=================================
|
||||
INTER-CPU LOCKING BARRIER EFFECTS
|
||||
|
@ -214,12 +214,13 @@ hardware.
|
||||
The interaction of the iflag bits is as follows (parity error
|
||||
given as an example):
|
||||
Parity error INPCK IGNPAR
|
||||
None n/a n/a character received
|
||||
Yes n/a 0 character discarded
|
||||
Yes 0 1 character received, marked as
|
||||
n/a 0 n/a character received, marked as
|
||||
TTY_NORMAL
|
||||
Yes 1 1 character received, marked as
|
||||
None 1 n/a character received, marked as
|
||||
TTY_NORMAL
|
||||
Yes 1 0 character received, marked as
|
||||
TTY_PARITY
|
||||
Yes 1 1 character discarded
|
||||
|
||||
Other flags may be used (eg, xon/xoff characters) if your
|
||||
hardware supports hardware "soft" flow control.
|
||||
|
17
MAINTAINERS
17
MAINTAINERS
@ -568,6 +568,18 @@ L: linuxppc-dev@ozlabs.org
|
||||
W: http://www.penguinppc.org/ppc64/
|
||||
S: Supported
|
||||
|
||||
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
||||
P: Michael Chan
|
||||
M: mchan@broadcom.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
BROADCOM TG3 GIGABIT ETHERNET DRIVER
|
||||
P: Michael Chan
|
||||
M: mchan@broadcom.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
BTTV VIDEO4LINUX DRIVER
|
||||
P: Mauro Carvalho Chehab
|
||||
M: mchehab@infradead.org
|
||||
@ -1877,6 +1889,11 @@ L: linux-kernel@vger.kernel.org
|
||||
W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
|
||||
S: Maintained
|
||||
|
||||
MULTIMEDIA CARD SUBSYSTEM
|
||||
P: Russell King
|
||||
M: rmk+mmc@arm.linux.org.uk
|
||||
S: Maintained
|
||||
|
||||
MULTISOUND SOUND DRIVER
|
||||
P: Andrew Veliath
|
||||
M: andrewtv@usa.net
|
||||
|
4
Makefile
4
Makefile
@ -1,8 +1,8 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =-rc5
|
||||
NAME=Lordi Rules
|
||||
EXTRAVERSION =-rc6
|
||||
NAME=Crazed Snow-Weasel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
@ -182,7 +182,6 @@ EXPORT_SYMBOL(smp_num_cpus);
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
EXPORT_SYMBOL(smp_call_function_on_cpu);
|
||||
EXPORT_SYMBOL(_atomic_dec_and_lock);
|
||||
EXPORT_SYMBOL(cpu_present_mask);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
|
@ -94,7 +94,7 @@ common_shutdown_1(void *generic_ptr)
|
||||
if (cpuid != boot_cpuid) {
|
||||
flags |= 0x00040000UL; /* "remain halted" */
|
||||
*pflags = flags;
|
||||
clear_bit(cpuid, &cpu_present_mask);
|
||||
cpu_clear(cpuid, cpu_present_map);
|
||||
halt();
|
||||
}
|
||||
#endif
|
||||
@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Wait for the secondaries to halt. */
|
||||
cpu_clear(boot_cpuid, cpu_possible_map);
|
||||
while (cpus_weight(cpu_possible_map))
|
||||
cpu_clear(boot_cpuid, cpu_present_map);
|
||||
while (cpus_weight(cpu_present_map))
|
||||
barrier();
|
||||
#endif
|
||||
|
||||
|
@ -68,7 +68,6 @@ enum ipi_message_type {
|
||||
static int smp_secondary_alive __initdata = 0;
|
||||
|
||||
/* Which cpus ids came online. */
|
||||
cpumask_t cpu_present_mask;
|
||||
cpumask_t cpu_online_map;
|
||||
|
||||
EXPORT_SYMBOL(cpu_online_map);
|
||||
@ -439,7 +438,7 @@ setup_smp(void)
|
||||
if ((cpu->flags & 0x1cc) == 0x1cc) {
|
||||
smp_num_probed++;
|
||||
/* Assume here that "whami" == index */
|
||||
cpu_set(i, cpu_present_mask);
|
||||
cpu_set(i, cpu_present_map);
|
||||
cpu->pal_revision = boot_cpu_palrev;
|
||||
}
|
||||
|
||||
@ -450,11 +449,10 @@ setup_smp(void)
|
||||
}
|
||||
} else {
|
||||
smp_num_probed = 1;
|
||||
cpu_set(boot_cpuid, cpu_present_mask);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
|
||||
smp_num_probed, cpu_possible_map.bits[0]);
|
||||
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
|
||||
smp_num_probed, cpu_present_map.bits[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -473,7 +471,7 @@ smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
/* Nothing to do on a UP box, or when told not to. */
|
||||
if (smp_num_probed == 1 || max_cpus == 0) {
|
||||
cpu_present_mask = cpumask_of_cpu(boot_cpuid);
|
||||
cpu_present_map = cpumask_of_cpu(boot_cpuid);
|
||||
printk(KERN_INFO "SMP mode deactivated.\n");
|
||||
return;
|
||||
}
|
||||
@ -486,10 +484,6 @@ smp_prepare_cpus(unsigned int max_cpus)
|
||||
void __devinit
|
||||
smp_prepare_boot_cpu(void)
|
||||
{
|
||||
/*
|
||||
* Mark the boot cpu (current cpu) as online
|
||||
*/
|
||||
cpu_set(smp_processor_id(), cpu_online_map);
|
||||
}
|
||||
|
||||
int __devinit
|
||||
|
@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
|
||||
register int bcpu = boot_cpuid;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_t cpm = cpu_present_mask;
|
||||
cpumask_t cpm = cpu_present_map;
|
||||
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
|
||||
unsigned long mask0, mask1, mask2, mask3, dummy;
|
||||
|
||||
|
@ -101,7 +101,7 @@ config DEBUG_S3C2410_UART
|
||||
help
|
||||
Choice for UART for kernel low-level using S3C2410 UARTS,
|
||||
should be between zero and two. The port must have been
|
||||
initalised by the boot-loader before use.
|
||||
initialised by the boot-loader before use.
|
||||
|
||||
The uncompressor code port configuration is now handled
|
||||
by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
|
||||
|
@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned char ts72xx_rtc_readb(unsigned long addr)
|
||||
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
|
||||
{
|
||||
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
|
||||
return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
|
||||
}
|
||||
|
||||
static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
|
||||
static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
|
||||
{
|
||||
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
|
||||
__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
|
||||
}
|
||||
|
||||
static struct m48t86_ops ts72xx_rtc_ops = {
|
||||
.readb = ts72xx_rtc_readb,
|
||||
.writeb = ts72xx_rtc_writeb,
|
||||
.readbyte = ts72xx_rtc_readbyte,
|
||||
.writebyte = ts72xx_rtc_writebyte,
|
||||
};
|
||||
|
||||
static struct platform_device ts72xx_rtc_device = {
|
||||
|
@ -178,8 +178,12 @@ static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
|
||||
|
||||
static void ixp23xx_irq_mask(unsigned int irq)
|
||||
{
|
||||
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
volatile unsigned long *intr_reg;
|
||||
|
||||
if (irq >= 56)
|
||||
irq += 8;
|
||||
|
||||
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
*intr_reg &= ~(1 << (irq % 32));
|
||||
}
|
||||
|
||||
@ -199,17 +203,25 @@ static void ixp23xx_irq_ack(unsigned int irq)
|
||||
*/
|
||||
static void ixp23xx_irq_level_unmask(unsigned int irq)
|
||||
{
|
||||
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
volatile unsigned long *intr_reg;
|
||||
|
||||
ixp23xx_irq_ack(irq);
|
||||
|
||||
if (irq >= 56)
|
||||
irq += 8;
|
||||
|
||||
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
*intr_reg |= (1 << (irq % 32));
|
||||
}
|
||||
|
||||
static void ixp23xx_irq_edge_unmask(unsigned int irq)
|
||||
{
|
||||
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
volatile unsigned long *intr_reg;
|
||||
|
||||
if (irq >= 56)
|
||||
irq += 8;
|
||||
|
||||
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
|
||||
*intr_reg |= (1 << (irq % 32));
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ config IXP4XX_INDIRECT_PCI
|
||||
2) If > 64MB of memory space is required, the IXP4xx can be
|
||||
configured to use indirect registers to access PCI This allows
|
||||
for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
|
||||
The disadvantadge of this is that every PCI access requires
|
||||
The disadvantage of this is that every PCI access requires
|
||||
three local register accesses plus a spinlock, but in some
|
||||
cases the performance hit is acceptable. In addition, you cannot
|
||||
mmap() PCI devices in this case due to the indirect nature
|
||||
|
@ -493,6 +493,7 @@ static void __init mainstone_map_io(void)
|
||||
MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
|
||||
/* Maintainer: MontaVista Software Inc. */
|
||||
.phys_io = 0x40000000,
|
||||
.boot_params = 0xa0000100, /* BLOB boot parameter setting */
|
||||
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
|
||||
.map_io = mainstone_map_io,
|
||||
.init_irq = mainstone_init_irq,
|
||||
|
@ -170,7 +170,7 @@ config S3C2410_PM_DEBUG
|
||||
depends on ARCH_S3C2410 && PM
|
||||
help
|
||||
Say Y here if you want verbose debugging from the PM Suspend and
|
||||
Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt`
|
||||
Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
|
||||
for more information.
|
||||
|
||||
config S3C2410_PM_CHECK
|
||||
|
@ -376,7 +376,7 @@ void __init build_mem_type_table(void)
|
||||
ecc_mask = 0;
|
||||
}
|
||||
|
||||
if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
|
||||
if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
|
||||
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
||||
if (mem_types[i].prot_l1)
|
||||
mem_types[i].prot_l1 |= PMD_BIT4;
|
||||
@ -631,7 +631,7 @@ void setup_mm_for_reboot(char mode)
|
||||
pgd = init_mm.pgd;
|
||||
|
||||
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
|
||||
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
|
||||
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
||||
base_pmdval |= PMD_BIT4;
|
||||
|
||||
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
|
||||
|
@ -427,12 +427,13 @@ __xsc3_setup:
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
|
||||
mrc p15, 0, r0, c1, c0, 0 @ get control register
|
||||
bic r0, r0, #0x0200 @ .... ..R. .... ....
|
||||
bic r0, r0, #0x0002 @ .... .... .... ..A.
|
||||
orr r0, r0, #0x0005 @ .... .... .... .C.M
|
||||
#if BTB_ENABLE
|
||||
bic r0, r0, #0x0200 @ .... ..R. .... ....
|
||||
orr r0, r0, #0x3900 @ ..VI Z..S .... ....
|
||||
#else
|
||||
bic r0, r0, #0x0a00 @ .... Z.R. .... ....
|
||||
orr r0, r0, #0x3100 @ ..VI ...S .... ....
|
||||
#endif
|
||||
#if L2_CACHE_ENABLE
|
||||
|
@ -1066,14 +1066,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = disable_acpi_pci,
|
||||
.ident = "HP xw9300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -5,17 +5,34 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static int nvidia_hpet_detected __initdata;
|
||||
|
||||
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
|
||||
{
|
||||
nvidia_hpet_detected = 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init check_bridge(int vendor, int device)
|
||||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
/* According to Nvidia all timer overrides are bogus. Just ignore
|
||||
them all. */
|
||||
/* According to Nvidia all timer overrides are bogus unless HPET
|
||||
is enabled. */
|
||||
if (vendor == PCI_VENDOR_ID_NVIDIA) {
|
||||
acpi_skip_timer_override = 1;
|
||||
nvidia_hpet_detected = 0;
|
||||
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
|
||||
if (nvidia_hpet_detected == 0) {
|
||||
acpi_skip_timer_override = 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
|
||||
|
@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
|
||||
if (efi_enabled)
|
||||
efi_map_memmap();
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
check_acpi_pci(); /* Checks more than just ACPI actually */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* Parse the ACPI tables for possible boot-time SMP configuration.
|
||||
*/
|
||||
acpi_boot_table_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
check_acpi_pci(); /* Checks more than just ACPI actually */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
acpi_boot_init();
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
|
||||
|
@ -93,9 +93,11 @@ int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid
|
||||
int i;
|
||||
for (i = 0; apic_probe[i]; ++i) {
|
||||
if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) {
|
||||
genapic = apic_probe[i];
|
||||
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
|
||||
genapic->name);
|
||||
if (!cmdline_apic) {
|
||||
genapic = apic_probe[i];
|
||||
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
|
||||
genapic->name);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -107,9 +109,11 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
int i;
|
||||
for (i = 0; apic_probe[i]; ++i) {
|
||||
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
|
||||
genapic = apic_probe[i];
|
||||
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
|
||||
genapic->name);
|
||||
if (!cmdline_apic) {
|
||||
genapic = apic_probe[i];
|
||||
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
|
||||
genapic->name);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ choice
|
||||
default SGI_IP22
|
||||
|
||||
config MIPS_MTX1
|
||||
bool "Support for 4G Systems MTX-1 board"
|
||||
bool "4G Systems MTX-1 board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select SOC_AU1500
|
||||
@ -120,7 +120,7 @@ config MIPS_MIRAGE
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config MIPS_COBALT
|
||||
bool "Support for Cobalt Server"
|
||||
bool "Cobalt Server"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select I8259
|
||||
@ -132,7 +132,7 @@ config MIPS_COBALT
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config MACH_DECSTATION
|
||||
bool "Support for DECstations"
|
||||
bool "DECstations"
|
||||
select BOOT_ELF32
|
||||
select DMA_NONCOHERENT
|
||||
select EARLY_PRINTK
|
||||
@ -158,7 +158,7 @@ config MACH_DECSTATION
|
||||
otherwise choose R3000.
|
||||
|
||||
config MIPS_EV64120
|
||||
bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)"
|
||||
bool "Galileo EV64120 Evaluation board (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
@ -175,7 +175,7 @@ config MIPS_EV64120
|
||||
kernel for this platform.
|
||||
|
||||
config MIPS_EV96100
|
||||
bool "Support for Galileo EV96100 Evaluation board (EXPERIMENTAL)"
|
||||
bool "Galileo EV96100 Evaluation board (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
@ -195,7 +195,7 @@ config MIPS_EV96100
|
||||
here if you wish to build a kernel for this platform.
|
||||
|
||||
config MIPS_IVR
|
||||
bool "Support for Globespan IVR board"
|
||||
bool "Globespan IVR board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select ITE_BOARD_GEN
|
||||
@ -211,7 +211,7 @@ config MIPS_IVR
|
||||
build a kernel for this platform.
|
||||
|
||||
config MIPS_ITE8172
|
||||
bool "Support for ITE 8172G board"
|
||||
bool "ITE 8172G board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select ITE_BOARD_GEN
|
||||
@ -228,7 +228,7 @@ config MIPS_ITE8172
|
||||
a kernel for this platform.
|
||||
|
||||
config MACH_JAZZ
|
||||
bool "Support for the Jazz family of machines"
|
||||
bool "Jazz family of machines"
|
||||
select ARC
|
||||
select ARC32
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
@ -246,7 +246,7 @@ config MACH_JAZZ
|
||||
Olivetti M700-10 workstations.
|
||||
|
||||
config LASAT
|
||||
bool "Support for LASAT Networks platforms"
|
||||
bool "LASAT Networks platforms"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select MIPS_GT64120
|
||||
@ -258,7 +258,7 @@ config LASAT
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config MIPS_ATLAS
|
||||
bool "Support for MIPS Atlas board"
|
||||
bool "MIPS Atlas board"
|
||||
select BOOT_ELF32
|
||||
select DMA_NONCOHERENT
|
||||
select IRQ_CPU
|
||||
@ -283,7 +283,7 @@ config MIPS_ATLAS
|
||||
board.
|
||||
|
||||
config MIPS_MALTA
|
||||
bool "Support for MIPS Malta board"
|
||||
bool "MIPS Malta board"
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
select BOOT_ELF32
|
||||
select HAVE_STD_PC_SERIAL_PORT
|
||||
@ -311,7 +311,7 @@ config MIPS_MALTA
|
||||
board.
|
||||
|
||||
config MIPS_SEAD
|
||||
bool "Support for MIPS SEAD board (EXPERIMENTAL)"
|
||||
bool "MIPS SEAD board (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select IRQ_CPU
|
||||
select DMA_NONCOHERENT
|
||||
@ -328,7 +328,7 @@ config MIPS_SEAD
|
||||
board.
|
||||
|
||||
config MIPS_SIM
|
||||
bool 'Support for MIPS simulator (MIPSsim)'
|
||||
bool 'MIPS simulator (MIPSsim)'
|
||||
select DMA_NONCOHERENT
|
||||
select IRQ_CPU
|
||||
select SYS_HAS_CPU_MIPS32_R1
|
||||
@ -341,7 +341,7 @@ config MIPS_SIM
|
||||
emulator.
|
||||
|
||||
config MOMENCO_JAGUAR_ATX
|
||||
bool "Support for Momentum Jaguar board"
|
||||
bool "Momentum Jaguar board"
|
||||
select BOOT_ELF32
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
@ -361,7 +361,7 @@ config MOMENCO_JAGUAR_ATX
|
||||
Momentum Computer <http://www.momenco.com/>.
|
||||
|
||||
config MOMENCO_OCELOT
|
||||
bool "Support for Momentum Ocelot board"
|
||||
bool "Momentum Ocelot board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select IRQ_CPU
|
||||
@ -378,7 +378,7 @@ config MOMENCO_OCELOT
|
||||
Momentum Computer <http://www.momenco.com/>.
|
||||
|
||||
config MOMENCO_OCELOT_3
|
||||
bool "Support for Momentum Ocelot-3 board"
|
||||
bool "Momentum Ocelot-3 board"
|
||||
select BOOT_ELF32
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
@ -397,7 +397,7 @@ config MOMENCO_OCELOT_3
|
||||
PMC-Sierra Rm79000 core.
|
||||
|
||||
config MOMENCO_OCELOT_C
|
||||
bool "Support for Momentum Ocelot-C board"
|
||||
bool "Momentum Ocelot-C board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select IRQ_CPU
|
||||
@ -414,7 +414,7 @@ config MOMENCO_OCELOT_C
|
||||
Momentum Computer <http://www.momenco.com/>.
|
||||
|
||||
config MOMENCO_OCELOT_G
|
||||
bool "Support for Momentum Ocelot-G board"
|
||||
bool "Momentum Ocelot-G board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select IRQ_CPU
|
||||
@ -431,23 +431,23 @@ config MOMENCO_OCELOT_G
|
||||
Momentum Computer <http://www.momenco.com/>.
|
||||
|
||||
config MIPS_XXS1500
|
||||
bool "Support for MyCable XXS1500 board"
|
||||
bool "MyCable XXS1500 board"
|
||||
select DMA_NONCOHERENT
|
||||
select SOC_AU1500
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config PNX8550_V2PCI
|
||||
bool "Support for Philips PNX8550 based Viper2-PCI board"
|
||||
bool "Philips PNX8550 based Viper2-PCI board"
|
||||
select PNX8550
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config PNX8550_JBS
|
||||
bool "Support for Philips PNX8550 based JBS board"
|
||||
bool "Philips PNX8550 based JBS board"
|
||||
select PNX8550
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config DDB5074
|
||||
bool "Support for NEC DDB Vrc-5074 (EXPERIMENTAL)"
|
||||
bool "NEC DDB Vrc-5074 (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select DDB5XXX_COMMON
|
||||
select DMA_NONCOHERENT
|
||||
@ -465,7 +465,7 @@ config DDB5074
|
||||
evaluation board.
|
||||
|
||||
config DDB5476
|
||||
bool "Support for NEC DDB Vrc-5476"
|
||||
bool "NEC DDB Vrc-5476"
|
||||
select DDB5XXX_COMMON
|
||||
select DMA_NONCOHERENT
|
||||
select HAVE_STD_PC_SERIAL_PORT
|
||||
@ -486,7 +486,7 @@ config DDB5476
|
||||
IDE controller, PS2 keyboard, PS2 mouse, etc.
|
||||
|
||||
config DDB5477
|
||||
bool "Support for NEC DDB Vrc-5477"
|
||||
bool "NEC DDB Vrc-5477"
|
||||
select DDB5XXX_COMMON
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
@ -504,13 +504,13 @@ config DDB5477
|
||||
ether port USB, AC97, PCI, etc.
|
||||
|
||||
config MACH_VR41XX
|
||||
bool "Support for NEC VR4100 series based machines"
|
||||
bool "NEC VR41XX-based machines"
|
||||
select SYS_HAS_CPU_VR41XX
|
||||
select SYS_SUPPORTS_32BIT_KERNEL
|
||||
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
|
||||
|
||||
config PMC_YOSEMITE
|
||||
bool "Support for PMC-Sierra Yosemite eval board"
|
||||
bool "PMC-Sierra Yosemite eval board"
|
||||
select DMA_COHERENT
|
||||
select HW_HAS_PCI
|
||||
select IRQ_CPU
|
||||
@ -527,7 +527,7 @@ config PMC_YOSEMITE
|
||||
manufactured by PMC-Sierra.
|
||||
|
||||
config QEMU
|
||||
bool "Support for Qemu"
|
||||
bool "Qemu"
|
||||
select DMA_COHERENT
|
||||
select GENERIC_ISA_DMA
|
||||
select HAVE_STD_PC_SERIAL_PORT
|
||||
@ -547,7 +547,7 @@ config QEMU
|
||||
can be found at http://www.linux-mips.org/wiki/Qemu.
|
||||
|
||||
config SGI_IP22
|
||||
bool "Support for SGI IP22 (Indy/Indigo2)"
|
||||
bool "SGI IP22 (Indy/Indigo2)"
|
||||
select ARC
|
||||
select ARC32
|
||||
select BOOT_ELF32
|
||||
@ -567,7 +567,7 @@ config SGI_IP22
|
||||
that runs on these, say Y here.
|
||||
|
||||
config SGI_IP27
|
||||
bool "Support for SGI IP27 (Origin200/2000)"
|
||||
bool "SGI IP27 (Origin200/2000)"
|
||||
select ARC
|
||||
select ARC64
|
||||
select BOOT_ELF64
|
||||
@ -583,7 +583,7 @@ config SGI_IP27
|
||||
here.
|
||||
|
||||
config SGI_IP32
|
||||
bool "Support for SGI IP32 (O2) (EXPERIMENTAL)"
|
||||
bool "SGI IP32 (O2) (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select ARC
|
||||
select ARC32
|
||||
@ -604,7 +604,7 @@ config SGI_IP32
|
||||
If you want this kernel to run on SGI O2 workstation, say Y here.
|
||||
|
||||
config SIBYTE_BIGSUR
|
||||
bool "Support for Sibyte BCM91480B-BigSur"
|
||||
bool "Sibyte BCM91480B-BigSur"
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
select PCI_DOMAINS
|
||||
@ -615,7 +615,7 @@ config SIBYTE_BIGSUR
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_SWARM
|
||||
bool "Support for Sibyte BCM91250A-SWARM"
|
||||
bool "Sibyte BCM91250A-SWARM"
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
select SIBYTE_SB1250
|
||||
@ -626,7 +626,7 @@ config SIBYTE_SWARM
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_SENTOSA
|
||||
bool "Support for Sibyte BCM91250E-Sentosa"
|
||||
bool "Sibyte BCM91250E-Sentosa"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -637,7 +637,7 @@ config SIBYTE_SENTOSA
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_RHONE
|
||||
bool "Support for Sibyte BCM91125E-Rhone"
|
||||
bool "Sibyte BCM91125E-Rhone"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -648,7 +648,7 @@ config SIBYTE_RHONE
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_CARMEL
|
||||
bool "Support for Sibyte BCM91120x-Carmel"
|
||||
bool "Sibyte BCM91120x-Carmel"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -659,7 +659,7 @@ config SIBYTE_CARMEL
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_PTSWARM
|
||||
bool "Support for Sibyte BCM91250PT-PTSWARM"
|
||||
bool "Sibyte BCM91250PT-PTSWARM"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -671,7 +671,7 @@ config SIBYTE_PTSWARM
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_LITTLESUR
|
||||
bool "Support for Sibyte BCM91250C2-LittleSur"
|
||||
bool "Sibyte BCM91250C2-LittleSur"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -683,7 +683,7 @@ config SIBYTE_LITTLESUR
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_CRHINE
|
||||
bool "Support for Sibyte BCM91120C-CRhine"
|
||||
bool "Sibyte BCM91120C-CRhine"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -694,7 +694,7 @@ config SIBYTE_CRHINE
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SIBYTE_CRHONE
|
||||
bool "Support for Sibyte BCM91125C-CRhone"
|
||||
bool "Sibyte BCM91125C-CRhone"
|
||||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
@ -706,7 +706,7 @@ config SIBYTE_CRHONE
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
||||
config SNI_RM200_PCI
|
||||
bool "Support for SNI RM200 PCI"
|
||||
bool "SNI RM200 PCI"
|
||||
select ARC
|
||||
select ARC32
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
@ -732,7 +732,7 @@ config SNI_RM200_PCI
|
||||
support this machine type.
|
||||
|
||||
config TOSHIBA_JMR3927
|
||||
bool "Support for Toshiba JMR-TX3927 board"
|
||||
bool "Toshiba JMR-TX3927 board"
|
||||
select DMA_NONCOHERENT
|
||||
select HW_HAS_PCI
|
||||
select MIPS_TX3927
|
||||
@ -743,7 +743,7 @@ config TOSHIBA_JMR3927
|
||||
select TOSHIBA_BOARDS
|
||||
|
||||
config TOSHIBA_RBTX4927
|
||||
bool "Support for Toshiba TBTX49[23]7 board"
|
||||
bool "Toshiba TBTX49[23]7 board"
|
||||
select DMA_NONCOHERENT
|
||||
select HAS_TXX9_SERIAL
|
||||
select HW_HAS_PCI
|
||||
@ -760,7 +760,7 @@ config TOSHIBA_RBTX4927
|
||||
support this machine type
|
||||
|
||||
config TOSHIBA_RBTX4938
|
||||
bool "Support for Toshiba RBTX4938 board"
|
||||
bool "Toshiba RBTX4938 board"
|
||||
select HAVE_STD_PC_SERIAL_PORT
|
||||
select DMA_NONCOHERENT
|
||||
select GENERIC_ISA_DMA
|
||||
@ -1411,13 +1411,12 @@ config PAGE_SIZE_8KB
|
||||
|
||||
config PAGE_SIZE_16KB
|
||||
bool "16kB"
|
||||
depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX
|
||||
depends on !CPU_R3000 && !CPU_TX39XX
|
||||
help
|
||||
Using 16kB page size will result in higher performance kernel at
|
||||
the price of higher memory consumption. This option is available on
|
||||
all non-R3000 family processor. Not that at the time of this
|
||||
writing this option is still high experimental; there are also
|
||||
issues with compatibility of user applications.
|
||||
all non-R3000 family processors. Note that you will need a suitable
|
||||
Linux distribution to support this.
|
||||
|
||||
config PAGE_SIZE_64KB
|
||||
bool "64kB"
|
||||
@ -1426,8 +1425,7 @@ config PAGE_SIZE_64KB
|
||||
Using 64kB page size will result in higher performance kernel at
|
||||
the price of higher memory consumption. This option is available on
|
||||
all non-R3000 family processor. Not that at the time of this
|
||||
writing this option is still high experimental; there are also
|
||||
issues with compatibility of user applications.
|
||||
writing this option is still high experimental.
|
||||
|
||||
endchoice
|
||||
|
||||
|
@ -68,6 +68,7 @@
|
||||
|
||||
extern void set_debug_traps(void);
|
||||
extern irq_cpustat_t irq_stat [NR_CPUS];
|
||||
extern void mips_timer_interrupt(struct pt_regs *regs);
|
||||
|
||||
static void setup_local_irq(unsigned int irq, int type, int int_req);
|
||||
static unsigned int startup_irq(unsigned int irq);
|
||||
|
@ -1,10 +1,9 @@
|
||||
/*
|
||||
*
|
||||
* BRIEF MODULE DESCRIPTION
|
||||
* PROM library initialisation code, assuming a version of
|
||||
* pmon is the boot code.
|
||||
* PROM library initialisation code, assuming YAMON is the boot loader.
|
||||
*
|
||||
* Copyright 2000,2001 MontaVista Software Inc.
|
||||
* Copyright 2000, 2001, 2006 MontaVista Software Inc.
|
||||
* Author: MontaVista Software, Inc.
|
||||
* ppopov@mvista.com or source@mvista.com
|
||||
*
|
||||
@ -49,9 +48,9 @@ extern char **prom_argv, **prom_envp;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
char *name;
|
||||
/* char *val; */
|
||||
}t_env_var;
|
||||
char *name;
|
||||
char *val;
|
||||
} t_env_var;
|
||||
|
||||
|
||||
char * prom_getcmdline(void)
|
||||
@ -85,21 +84,16 @@ char *prom_getenv(char *envname)
|
||||
{
|
||||
/*
|
||||
* Return a pointer to the given environment variable.
|
||||
* Environment variables are stored in the form of "memsize=64".
|
||||
*/
|
||||
|
||||
t_env_var *env = (t_env_var *)prom_envp;
|
||||
int i;
|
||||
|
||||
i = strlen(envname);
|
||||
|
||||
while(env->name) {
|
||||
if(strncmp(envname, env->name, i) == 0) {
|
||||
return(env->name + strlen(envname) + 1);
|
||||
}
|
||||
while (env->name) {
|
||||
if (strcmp(envname, env->name) == 0)
|
||||
return env->val;
|
||||
env++;
|
||||
}
|
||||
return(NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline unsigned char str2hexnum(unsigned char c)
|
||||
|
@ -112,6 +112,11 @@ sdsleep:
|
||||
mtc0 k0, CP0_PAGEMASK
|
||||
lw k0, 0x14(sp)
|
||||
mtc0 k0, CP0_CONFIG
|
||||
|
||||
/* We need to catch the ealry Alchemy SOCs with
|
||||
* the write-only Config[OD] bit and set it back to one...
|
||||
*/
|
||||
jal au1x00_fixup_config_od
|
||||
lw $1, PT_R1(sp)
|
||||
lw $2, PT_R2(sp)
|
||||
lw $3, PT_R3(sp)
|
||||
|
@ -116,6 +116,7 @@ void mips_timer_interrupt(struct pt_regs *regs)
|
||||
|
||||
null:
|
||||
ack_r4ktimer(0);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -149,7 +149,7 @@ void serial_set(int channel, unsigned long baud)
|
||||
#else
|
||||
/*
|
||||
* Note: Set baud rate, hardcoded here for rate of 115200
|
||||
* since became unsure of above "buad rate" algorithm (??).
|
||||
* since became unsure of above "baud rate" algorithm (??).
|
||||
*/
|
||||
outreg(channel, LCR, 0x83);
|
||||
outreg(channel, DLM, 0x00); // See note above
|
||||
|
@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -72,7 +72,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -272,8 +272,8 @@ void output_sc_defines(void)
|
||||
text("/* Linux sigcontext offsets. */");
|
||||
offset("#define SC_REGS ", struct sigcontext, sc_regs);
|
||||
offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
|
||||
offset("#define SC_MDHI ", struct sigcontext, sc_hi);
|
||||
offset("#define SC_MDLO ", struct sigcontext, sc_lo);
|
||||
offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
|
||||
offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
|
||||
offset("#define SC_PC ", struct sigcontext, sc_pc);
|
||||
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
|
||||
linefeed;
|
||||
|
@ -206,7 +206,7 @@ static inline void check_daddi(void)
|
||||
"daddi %0, %1, %3\n\t"
|
||||
".set pop"
|
||||
: "=r" (v), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9a), "I" (0x1234));
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
set_except_vector(12, handler);
|
||||
local_irq_restore(flags);
|
||||
|
||||
@ -224,7 +224,7 @@ static inline void check_daddi(void)
|
||||
"dsrl %1, %1, 1\n\t"
|
||||
"daddi %0, %1, %3"
|
||||
: "=r" (v), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9a), "I" (0x1234));
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
set_except_vector(12, handler);
|
||||
local_irq_restore(flags);
|
||||
|
||||
@ -280,7 +280,7 @@ static inline void check_daddiu(void)
|
||||
"daddu %1, %2\n\t"
|
||||
".set pop"
|
||||
: "=&r" (v), "=&r" (w), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9a), "I" (0x1234));
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
|
||||
if (v == w) {
|
||||
printk("no.\n");
|
||||
@ -296,7 +296,7 @@ static inline void check_daddiu(void)
|
||||
"addiu %1, $0, %4\n\t"
|
||||
"daddu %1, %2"
|
||||
: "=&r" (v), "=&r" (w), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9a), "I" (0x1234));
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
|
||||
if (v == w) {
|
||||
printk("yes.\n");
|
||||
|
@ -121,6 +121,7 @@ static inline void check_wait(void)
|
||||
case CPU_24K:
|
||||
case CPU_25KF:
|
||||
case CPU_34K:
|
||||
case CPU_74K:
|
||||
case CPU_PR4450:
|
||||
cpu_wait = r4k_wait;
|
||||
printk(" available.\n");
|
||||
@ -432,6 +433,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
|
||||
MIPS_CPU_LLSC;
|
||||
c->tlbsize = 64;
|
||||
break;
|
||||
case PRID_IMP_R14000:
|
||||
c->cputype = CPU_R14000;
|
||||
c->isa_level = MIPS_CPU_ISA_IV;
|
||||
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
|
||||
MIPS_CPU_FPU | MIPS_CPU_32FPR |
|
||||
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
|
||||
MIPS_CPU_LLSC;
|
||||
c->tlbsize = 64;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -593,6 +603,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
|
||||
case PRID_IMP_34K:
|
||||
c->cputype = CPU_34K;
|
||||
break;
|
||||
case PRID_IMP_74K:
|
||||
c->cputype = CPU_74K;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -642,7 +655,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
|
||||
case PRID_IMP_SB1:
|
||||
c->cputype = CPU_SB1;
|
||||
/* FPU in pass1 is known to have issues. */
|
||||
if ((c->processor_id & 0xff) < 0x20)
|
||||
if ((c->processor_id & 0xff) < 0x02)
|
||||
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
|
||||
break;
|
||||
case PRID_IMP_SB1A:
|
||||
|
@ -101,7 +101,7 @@ FEXPORT(restore_all) # restore full frame
|
||||
EMT
|
||||
1:
|
||||
mfc0 v1, CP0_TCSTATUS
|
||||
/* We set IXMT above, XOR should cler it here */
|
||||
/* We set IXMT above, XOR should clear it here */
|
||||
xori v1, v1, TCSTATUS_IXMT
|
||||
or v1, v0, v1
|
||||
mtc0 v1, CP0_TCSTATUS
|
||||
|
@ -54,9 +54,11 @@
|
||||
*/
|
||||
mfc0 k0, CP0_CAUSE
|
||||
andi k0, k0, 0x7c
|
||||
add k1, k1, k0
|
||||
PTR_L k0, saved_vectors(k1)
|
||||
jr k0
|
||||
#ifdef CONFIG_64BIT
|
||||
dsll k0, k0, 1
|
||||
#endif
|
||||
PTR_L k1, saved_vectors(k0)
|
||||
jr k1
|
||||
nop
|
||||
1:
|
||||
move k0, sp
|
||||
|
@ -288,6 +288,9 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
|
||||
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
|
||||
+ ELF_MIPS_R_SYM(rel[i]);
|
||||
if (!sym->st_value) {
|
||||
/* Ignore unresolved weak symbol */
|
||||
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
|
||||
continue;
|
||||
printk(KERN_WARNING "%s: Unknown symbol %s\n",
|
||||
me->name, strtab + sym->st_name);
|
||||
return -ENOENT;
|
||||
@ -325,6 +328,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
|
||||
+ ELF_MIPS_R_SYM(rel[i]);
|
||||
if (!sym->st_value) {
|
||||
/* Ignore unresolved weak symbol */
|
||||
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
|
||||
continue;
|
||||
printk(KERN_WARNING "%s: Unknown symbol %s\n",
|
||||
me->name, strtab + sym->st_name);
|
||||
return -ENOENT;
|
||||
|
@ -42,6 +42,7 @@ static const char *cpu_name[] = {
|
||||
[CPU_R8000] = "R8000",
|
||||
[CPU_R10000] = "R10000",
|
||||
[CPU_R12000] = "R12000",
|
||||
[CPU_R14000] = "R14000",
|
||||
[CPU_R4300] = "R4300",
|
||||
[CPU_R4650] = "R4650",
|
||||
[CPU_R4700] = "R4700",
|
||||
@ -74,6 +75,7 @@ static const char *cpu_name[] = {
|
||||
[CPU_24K] = "MIPS 24K",
|
||||
[CPU_25KF] = "MIPS 25Kf",
|
||||
[CPU_34K] = "MIPS 34K",
|
||||
[CPU_74K] = "MIPS 74K",
|
||||
[CPU_VR4111] = "NEC VR4111",
|
||||
[CPU_VR4121] = "NEC VR4121",
|
||||
[CPU_VR4122] = "NEC VR4122",
|
||||
|
@ -209,7 +209,7 @@ sys_call_table:
|
||||
PTR sys_fork
|
||||
PTR sys_read
|
||||
PTR sys_write
|
||||
PTR sys_open /* 4005 */
|
||||
PTR compat_sys_open /* 4005 */
|
||||
PTR sys_close
|
||||
PTR sys_waitpid
|
||||
PTR sys_creat
|
||||
|
@ -246,7 +246,7 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en
|
||||
#ifdef CONFIG_64BIT
|
||||
/* HACK: Guess if the sign extension was forgotten */
|
||||
if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
|
||||
start |= 0xffffffff00000000;
|
||||
start |= 0xffffffff00000000UL;
|
||||
#endif
|
||||
|
||||
end = start + size;
|
||||
@ -355,8 +355,6 @@ static inline void bootmem_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
memory_present(0, first_usable_pfn, max_low_pfn);
|
||||
|
||||
/* Initialize the boot-time allocator with low memory only. */
|
||||
bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
|
||||
|
||||
@ -410,6 +408,7 @@ static inline void bootmem_init(void)
|
||||
|
||||
/* Register lowmem ranges */
|
||||
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
|
||||
memory_present(0, curr_pfn, curr_pfn + size - 1);
|
||||
}
|
||||
|
||||
/* Reserve the bootmap memory. */
|
||||
@ -419,17 +418,20 @@ static inline void bootmem_init(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
initrd_below_start_ok = 1;
|
||||
if (initrd_start) {
|
||||
unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
|
||||
unsigned long initrd_size = ((unsigned char *)initrd_end) -
|
||||
((unsigned char *)initrd_start);
|
||||
const int width = sizeof(long) * 2;
|
||||
|
||||
printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
(void *)initrd_start, initrd_size);
|
||||
|
||||
if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
printk("initrd extends beyond end of memory "
|
||||
"(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
|
||||
sizeof(long) * 2,
|
||||
(unsigned long long)CPHYSADDR(initrd_end),
|
||||
sizeof(long) * 2,
|
||||
(unsigned long long)PFN_PHYS(max_low_pfn));
|
||||
width,
|
||||
(unsigned long long) CPHYSADDR(initrd_end),
|
||||
width,
|
||||
(unsigned long long) PFN_PHYS(max_low_pfn));
|
||||
initrd_start = initrd_end = 0;
|
||||
initrd_reserve_bootmem = 0;
|
||||
}
|
||||
|
@ -31,7 +31,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
||||
save_gp_reg(31);
|
||||
#undef save_gp_reg
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
err |= __put_user(regs->hi, &sc->sc_mdhi);
|
||||
err |= __put_user(regs->lo, &sc->sc_mdlo);
|
||||
if (cpu_has_dsp) {
|
||||
@ -43,20 +42,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
||||
err |= __put_user(mflo3(), &sc->sc_lo3);
|
||||
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
err |= __put_user(regs->hi, &sc->sc_hi[0]);
|
||||
err |= __put_user(regs->lo, &sc->sc_lo[0]);
|
||||
if (cpu_has_dsp) {
|
||||
err |= __put_user(mfhi1(), &sc->sc_hi[1]);
|
||||
err |= __put_user(mflo1(), &sc->sc_lo[1]);
|
||||
err |= __put_user(mfhi2(), &sc->sc_hi[2]);
|
||||
err |= __put_user(mflo2(), &sc->sc_lo[2]);
|
||||
err |= __put_user(mfhi3(), &sc->sc_hi[3]);
|
||||
err |= __put_user(mflo3(), &sc->sc_lo[3]);
|
||||
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
|
||||
}
|
||||
#endif
|
||||
|
||||
err |= __put_user(!!used_math(), &sc->sc_used_math);
|
||||
|
||||
@ -92,7 +77,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
|
||||
#ifdef CONFIG_32BIT
|
||||
err |= __get_user(regs->hi, &sc->sc_mdhi);
|
||||
err |= __get_user(regs->lo, &sc->sc_mdlo);
|
||||
if (cpu_has_dsp) {
|
||||
@ -104,20 +88,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
||||
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
|
||||
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
err |= __get_user(regs->hi, &sc->sc_hi[0]);
|
||||
err |= __get_user(regs->lo, &sc->sc_lo[0]);
|
||||
if (cpu_has_dsp) {
|
||||
err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
|
||||
err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
|
||||
err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
|
||||
err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
|
||||
err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
|
||||
err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
|
||||
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define restore_gp_reg(i) do { \
|
||||
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
|
||||
|
@ -247,6 +247,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
current_thread_info()->cpu = 0;
|
||||
smp_tune_scheduling();
|
||||
plat_prepare_cpus(max_cpus);
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
cpu_present_map = cpu_possible_map;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* preload SMP state for boot cpu */
|
||||
@ -442,7 +445,7 @@ static int __init topology_init(void)
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
for_each_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
|
||||
if (ret)
|
||||
printk(KERN_WARNING "topology_init: register_cpu %d "
|
||||
|
@ -276,31 +276,9 @@ void sys_set_thread_area(unsigned long addr)
|
||||
|
||||
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
|
||||
{
|
||||
int tmp, len;
|
||||
char __user *name;
|
||||
int tmp;
|
||||
|
||||
switch(cmd) {
|
||||
case SETNAME: {
|
||||
char nodename[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
name = (char __user *) arg1;
|
||||
|
||||
len = strncpy_from_user(nodename, name, __NEW_UTS_LEN);
|
||||
if (len < 0)
|
||||
return -EFAULT;
|
||||
|
||||
down_write(&uts_sem);
|
||||
strncpy(system_utsname.nodename, nodename, len);
|
||||
nodename[__NEW_UTS_LEN] = '\0';
|
||||
strlcpy(system_utsname.nodename, nodename,
|
||||
sizeof(system_utsname.nodename));
|
||||
up_write(&uts_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
case MIPS_ATOMIC_SET:
|
||||
printk(KERN_CRIT "How did I get here?\n");
|
||||
return -EINVAL;
|
||||
@ -313,9 +291,6 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
|
||||
case FLUSH_CACHE:
|
||||
__flush_cache_all();
|
||||
return 0;
|
||||
|
||||
case MIPS_RDNVRAM:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -819,15 +819,30 @@ asmlinkage void do_watch(struct pt_regs *regs)
|
||||
|
||||
asmlinkage void do_mcheck(struct pt_regs *regs)
|
||||
{
|
||||
const int field = 2 * sizeof(unsigned long);
|
||||
int multi_match = regs->cp0_status & ST0_TS;
|
||||
|
||||
show_regs(regs);
|
||||
dump_tlb_all();
|
||||
|
||||
if (multi_match) {
|
||||
printk("Index : %0x\n", read_c0_index());
|
||||
printk("Pagemask: %0x\n", read_c0_pagemask());
|
||||
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
|
||||
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
|
||||
printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
|
||||
printk("\n");
|
||||
dump_tlb_all();
|
||||
}
|
||||
|
||||
show_code((unsigned int *) regs->cp0_epc);
|
||||
|
||||
/*
|
||||
* Some chips may have other causes of machine check (e.g. SB1
|
||||
* graduation timer)
|
||||
*/
|
||||
panic("Caught Machine Check exception - %scaused by multiple "
|
||||
"matching entries in the TLB.",
|
||||
(regs->cp0_status & ST0_TS) ? "" : "not ");
|
||||
(multi_match) ? "" : "not ");
|
||||
}
|
||||
|
||||
asmlinkage void do_mt(struct pt_regs *regs)
|
||||
@ -902,6 +917,7 @@ static inline void parity_protection_init(void)
|
||||
{
|
||||
switch (current_cpu_data.cputype) {
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_5KC:
|
||||
write_c0_ecc(0x80000000);
|
||||
back_to_back_c0_hazard();
|
||||
|
@ -151,23 +151,13 @@ SECTIONS
|
||||
|
||||
/* This is the MIPS specific mdebug section. */
|
||||
.mdebug : { *(.mdebug) }
|
||||
/* These are needed for ELF backends which have not yet been
|
||||
converted to the new style linker. */
|
||||
.stab 0 : { *(.stab) }
|
||||
.stabstr 0 : { *(.stabstr) }
|
||||
/* DWARF debug sections.
|
||||
Symbols in the .debug DWARF section are relative to the beginning of the
|
||||
section so we begin .debug at 0. It's not clear yet what needs to happen
|
||||
for the others. */
|
||||
.debug 0 : { *(.debug) }
|
||||
.debug_srcinfo 0 : { *(.debug_srcinfo) }
|
||||
.debug_aranges 0 : { *(.debug_aranges) }
|
||||
.debug_pubnames 0 : { *(.debug_pubnames) }
|
||||
.debug_sfnames 0 : { *(.debug_sfnames) }
|
||||
.line 0 : { *(.line) }
|
||||
|
||||
STABS_DEBUG
|
||||
|
||||
DWARF_DEBUG
|
||||
|
||||
/* These must appear regardless of . */
|
||||
.gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
|
||||
.gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
|
||||
.comment : { *(.comment) }
|
||||
.note : { *(.note) }
|
||||
}
|
||||
|
@ -29,7 +29,9 @@
|
||||
|
||||
ieee754dp ieee754dp_fint(int x)
|
||||
{
|
||||
COMPXDP;
|
||||
u64 xm;
|
||||
int xe;
|
||||
int xs;
|
||||
|
||||
CLEARCX;
|
||||
|
||||
|
@ -29,7 +29,9 @@
|
||||
|
||||
ieee754dp ieee754dp_flong(s64 x)
|
||||
{
|
||||
COMPXDP;
|
||||
u64 xm;
|
||||
int xe;
|
||||
int xs;
|
||||
|
||||
CLEARCX;
|
||||
|
||||
|
@ -29,7 +29,9 @@
|
||||
|
||||
ieee754sp ieee754sp_fint(int x)
|
||||
{
|
||||
COMPXSP;
|
||||
unsigned xm;
|
||||
int xe;
|
||||
int xs;
|
||||
|
||||
CLEARCX;
|
||||
|
||||
|
@ -29,7 +29,9 @@
|
||||
|
||||
ieee754sp ieee754sp_flong(s64 x)
|
||||
{
|
||||
COMPXDP; /* <--- need 64-bit mantissa temp */
|
||||
u64 xm; /* <--- need 64-bit mantissa temp */
|
||||
int xe;
|
||||
int xs;
|
||||
|
||||
CLEARCX;
|
||||
|
||||
|
@ -29,6 +29,27 @@
|
||||
#include <asm/war.h>
|
||||
#include <asm/cacheflush.h> /* for run_uncached() */
|
||||
|
||||
|
||||
/*
|
||||
* Special Variant of smp_call_function for use by cache functions:
|
||||
*
|
||||
* o No return value
|
||||
* o collapses to normal function call on UP kernels
|
||||
* o collapses to normal function call on systems with a single shared
|
||||
* primary cache.
|
||||
*/
|
||||
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
|
||||
int retry, int wait)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
|
||||
smp_call_function(func, info, retry, wait);
|
||||
#endif
|
||||
func(info);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Must die.
|
||||
*/
|
||||
@ -299,7 +320,7 @@ static void r4k_flush_cache_all(void)
|
||||
if (!cpu_has_dc_aliases)
|
||||
return;
|
||||
|
||||
on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
|
||||
}
|
||||
|
||||
static inline void local_r4k___flush_cache_all(void * args)
|
||||
@ -314,13 +335,14 @@ static inline void local_r4k___flush_cache_all(void * args)
|
||||
case CPU_R4400MC:
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
r4k_blast_scache();
|
||||
}
|
||||
}
|
||||
|
||||
static void r4k___flush_cache_all(void)
|
||||
{
|
||||
on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
|
||||
}
|
||||
|
||||
static inline void local_r4k_flush_cache_range(void * args)
|
||||
@ -341,7 +363,7 @@ static inline void local_r4k_flush_cache_range(void * args)
|
||||
static void r4k_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
|
||||
}
|
||||
|
||||
static inline void local_r4k_flush_cache_mm(void * args)
|
||||
@ -370,7 +392,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
|
||||
if (!cpu_has_dc_aliases)
|
||||
return;
|
||||
|
||||
on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
|
||||
}
|
||||
|
||||
struct flush_cache_page_args {
|
||||
@ -461,7 +483,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
|
||||
args.addr = addr;
|
||||
args.pfn = pfn;
|
||||
|
||||
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
|
||||
}
|
||||
|
||||
static inline void local_r4k_flush_data_cache_page(void * addr)
|
||||
@ -471,7 +493,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
|
||||
|
||||
static void r4k_flush_data_cache_page(unsigned long addr)
|
||||
{
|
||||
on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
|
||||
}
|
||||
|
||||
struct flush_icache_range_args {
|
||||
@ -514,7 +536,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
|
||||
args.start = start;
|
||||
args.end = end;
|
||||
|
||||
on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
|
||||
instruction_hazard();
|
||||
}
|
||||
|
||||
@ -590,7 +612,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
|
||||
args.vma = vma;
|
||||
args.page = page;
|
||||
|
||||
on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
|
||||
}
|
||||
|
||||
|
||||
@ -689,7 +711,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
|
||||
|
||||
static void r4k_flush_cache_sigtramp(unsigned long addr)
|
||||
{
|
||||
on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
|
||||
r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
|
||||
}
|
||||
|
||||
static void r4k_flush_icache_all(void)
|
||||
@ -812,6 +834,7 @@ static void __init probe_pcache(void)
|
||||
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
|
||||
c->icache.linesz = 64;
|
||||
c->icache.ways = 2;
|
||||
@ -965,9 +988,11 @@ static void __init probe_pcache(void)
|
||||
c->dcache.flags |= MIPS_CACHE_PINDEX;
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
case CPU_SB1:
|
||||
break;
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
if (!(read_c0_config7() & (1 << 16)))
|
||||
default:
|
||||
if (c->dcache.waysize > PAGE_SIZE)
|
||||
@ -1091,6 +1116,7 @@ static void __init setup_scache(void)
|
||||
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
|
||||
c->scache.linesz = 64 << ((config >> 13) & 1);
|
||||
c->scache.ways = 2;
|
||||
@ -1135,6 +1161,31 @@ static void __init setup_scache(void)
|
||||
c->options |= MIPS_CPU_SUBSET_CACHES;
|
||||
}
|
||||
|
||||
void au1x00_fixup_config_od(void)
|
||||
{
|
||||
/*
|
||||
* c0_config.od (bit 19) was write only (and read as 0)
|
||||
* on the early revisions of Alchemy SOCs. It disables the bus
|
||||
* transaction overlapping and needs to be set to fix various errata.
|
||||
*/
|
||||
switch (read_c0_prid()) {
|
||||
case 0x00030100: /* Au1000 DA */
|
||||
case 0x00030201: /* Au1000 HA */
|
||||
case 0x00030202: /* Au1000 HB */
|
||||
case 0x01030200: /* Au1500 AB */
|
||||
/*
|
||||
* Au1100 errata actually keeps silence about this bit, so we set it
|
||||
* just in case for those revisions that require it to be set according
|
||||
* to arch/mips/au1000/common/cputable.c
|
||||
*/
|
||||
case 0x02030200: /* Au1100 AB */
|
||||
case 0x02030201: /* Au1100 BA */
|
||||
case 0x02030202: /* Au1100 BC */
|
||||
set_c0_config(1 << 19);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void coherency_setup(void)
|
||||
{
|
||||
change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
|
||||
@ -1155,6 +1206,15 @@ static inline void coherency_setup(void)
|
||||
case CPU_R4400MC:
|
||||
clear_c0_config(CONF_CU);
|
||||
break;
|
||||
/*
|
||||
* We need to catch the ealry Alchemy SOCs with
|
||||
* the write-only co_config.od bit and set it back to one...
|
||||
*/
|
||||
case CPU_AU1000: /* rev. DA, HA, HB */
|
||||
case CPU_AU1100: /* rev. AB, BA, BC ?? */
|
||||
case CPU_AU1500: /* rev. AB */
|
||||
au1x00_fixup_config_od();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,7 +227,7 @@ void __init mem_init(void)
|
||||
for (tmp = 0; tmp < max_low_pfn; tmp++)
|
||||
if (page_is_ram(tmp)) {
|
||||
ram++;
|
||||
if (PageReserved(mem_map+tmp))
|
||||
if (PageReserved(pfn_to_page(tmp)))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
|
@ -357,6 +357,7 @@ void __init build_clear_page(void)
|
||||
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
pref_src_mode = Pref_LoadStreamed;
|
||||
pref_dst_mode = Pref_StoreStreamed;
|
||||
break;
|
||||
|
@ -875,6 +875,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
|
||||
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
case CPU_4KC:
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
@ -906,6 +907,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
|
||||
case CPU_4KEC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_74K:
|
||||
i_ehb(p);
|
||||
tlbw(p);
|
||||
break;
|
||||
|
@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
|
||||
/* disable interrupts */
|
||||
UART16550_WRITE(OFS_INTR_ENABLE, 0);
|
||||
|
||||
/* set up buad rate */
|
||||
/* set up baud rate */
|
||||
{
|
||||
uint32 divisor;
|
||||
|
||||
|
@ -14,8 +14,8 @@
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
extern struct op_mips_model op_model_mipsxx __attribute__((weak));
|
||||
extern struct op_mips_model op_model_rm9000 __attribute__((weak));
|
||||
extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak));
|
||||
extern struct op_mips_model op_model_rm9000_ops __attribute__((weak));
|
||||
|
||||
static struct op_mips_model *model;
|
||||
|
||||
@ -80,13 +80,14 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||
case CPU_24K:
|
||||
case CPU_25KF:
|
||||
case CPU_34K:
|
||||
case CPU_74K:
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
lmodel = &op_model_mipsxx;
|
||||
lmodel = &op_model_mipsxx_ops;
|
||||
break;
|
||||
|
||||
case CPU_RM9000:
|
||||
lmodel = &op_model_rm9000;
|
||||
lmodel = &op_model_rm9000_ops;
|
||||
break;
|
||||
};
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#define M_COUNTER_OVERFLOW (1UL << 31)
|
||||
|
||||
struct op_mips_model op_model_mipsxx;
|
||||
struct op_mips_model op_model_mipsxx_ops;
|
||||
|
||||
static struct mipsxx_register_config {
|
||||
unsigned int control[4];
|
||||
@ -34,7 +34,7 @@ static struct mipsxx_register_config {
|
||||
|
||||
static void mipsxx_reg_setup(struct op_counter_config *ctr)
|
||||
{
|
||||
unsigned int counters = op_model_mipsxx.num_counters;
|
||||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
int i;
|
||||
|
||||
/* Compute the performance counter control word. */
|
||||
@ -62,7 +62,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
|
||||
|
||||
static void mipsxx_cpu_setup (void *args)
|
||||
{
|
||||
unsigned int counters = op_model_mipsxx.num_counters;
|
||||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
|
||||
switch (counters) {
|
||||
case 4:
|
||||
@ -83,7 +83,7 @@ static void mipsxx_cpu_setup (void *args)
|
||||
/* Start all counters on current CPU */
|
||||
static void mipsxx_cpu_start(void *args)
|
||||
{
|
||||
unsigned int counters = op_model_mipsxx.num_counters;
|
||||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
|
||||
switch (counters) {
|
||||
case 4:
|
||||
@ -100,7 +100,7 @@ static void mipsxx_cpu_start(void *args)
|
||||
/* Stop all counters on current CPU */
|
||||
static void mipsxx_cpu_stop(void *args)
|
||||
{
|
||||
unsigned int counters = op_model_mipsxx.num_counters;
|
||||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
|
||||
switch (counters) {
|
||||
case 4:
|
||||
@ -116,7 +116,7 @@ static void mipsxx_cpu_stop(void *args)
|
||||
|
||||
static int mipsxx_perfcount_handler(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int counters = op_model_mipsxx.num_counters;
|
||||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
unsigned int control;
|
||||
unsigned int counter;
|
||||
int handled = 0;
|
||||
@ -187,33 +187,37 @@ static int __init mipsxx_init(void)
|
||||
|
||||
reset_counters(counters);
|
||||
|
||||
op_model_mipsxx.num_counters = counters;
|
||||
op_model_mipsxx_ops.num_counters = counters;
|
||||
switch (current_cpu_data.cputype) {
|
||||
case CPU_20KC:
|
||||
op_model_mipsxx.cpu_type = "mips/20K";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/20K";
|
||||
break;
|
||||
|
||||
case CPU_24K:
|
||||
op_model_mipsxx.cpu_type = "mips/24K";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/24K";
|
||||
break;
|
||||
|
||||
case CPU_25KF:
|
||||
op_model_mipsxx.cpu_type = "mips/25K";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/25K";
|
||||
break;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
case CPU_34K:
|
||||
op_model_mipsxx.cpu_type = "mips/34K";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/34K";
|
||||
break;
|
||||
|
||||
case CPU_74K:
|
||||
op_model_mipsxx_ops.cpu_type = "mips/74K";
|
||||
break;
|
||||
#endif
|
||||
|
||||
case CPU_5KC:
|
||||
op_model_mipsxx.cpu_type = "mips/5K";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/5K";
|
||||
break;
|
||||
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
op_model_mipsxx.cpu_type = "mips/sb1";
|
||||
op_model_mipsxx_ops.cpu_type = "mips/sb1";
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -229,12 +233,12 @@ static int __init mipsxx_init(void)
|
||||
|
||||
static void mipsxx_exit(void)
|
||||
{
|
||||
reset_counters(op_model_mipsxx.num_counters);
|
||||
reset_counters(op_model_mipsxx_ops.num_counters);
|
||||
|
||||
perf_irq = null_perf_irq;
|
||||
}
|
||||
|
||||
struct op_mips_model op_model_mipsxx = {
|
||||
struct op_mips_model op_model_mipsxx_ops = {
|
||||
.reg_setup = mipsxx_reg_setup,
|
||||
.cpu_setup = mipsxx_cpu_setup,
|
||||
.init = mipsxx_init,
|
||||
|
@ -126,7 +126,7 @@ static void rm9000_exit(void)
|
||||
free_irq(rm9000_perfcount_irq, NULL);
|
||||
}
|
||||
|
||||
struct op_mips_model op_model_rm9000 = {
|
||||
struct op_mips_model op_model_rm9000_ops = {
|
||||
.reg_setup = rm9000_reg_setup,
|
||||
.cpu_setup = rm9000_cpu_setup,
|
||||
.init = rm9000_init,
|
||||
|
@ -31,12 +31,12 @@
|
||||
/* issue a PIO read to make sure no PIO writes are pending */
|
||||
static void inline flush_crime_bus(void)
|
||||
{
|
||||
volatile unsigned long junk = crime->control;
|
||||
crime->control;
|
||||
}
|
||||
|
||||
static void inline flush_mace_bus(void)
|
||||
{
|
||||
volatile unsigned long junk = mace->perif.ctrl.misc;
|
||||
mace->perif.ctrl.misc;
|
||||
}
|
||||
|
||||
#undef DEBUG_IRQ
|
||||
|
@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
|
||||
/* try calling the ibm,client-architecture-support method */
|
||||
if (call_prom_ret("call-method", 3, 2, &ret,
|
||||
ADDR("ibm,client-architecture-support"),
|
||||
root,
|
||||
ADDR(ibm_architecture_vec)) == 0) {
|
||||
/* the call exists... */
|
||||
if (ret)
|
||||
@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
|
||||
if (strstr(p, RELOC("Power Macintosh")) ||
|
||||
strstr(p, RELOC("MacRISC")))
|
||||
return PLATFORM_POWERMAC;
|
||||
#ifdef CONFIG_PPC64
|
||||
/* We must make sure we don't detect the IBM Cell
|
||||
* blades as pSeries due to some firmware issues,
|
||||
* so we do it here.
|
||||
*/
|
||||
if (strstr(p, RELOC("IBM,CBEA")) ||
|
||||
strstr(p, RELOC("IBM,CPBW-1.0")))
|
||||
return PLATFORM_GENERIC;
|
||||
#endif /* CONFIG_PPC64 */
|
||||
i += sl + 1;
|
||||
}
|
||||
}
|
||||
|
@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
|
||||
if (__get_user(cmcp, &ucp->uc_regs))
|
||||
return -EFAULT;
|
||||
mcp = (struct mcontext __user *)(u64)cmcp;
|
||||
/* no need to check access_ok(mcp), since mcp < 4GB */
|
||||
}
|
||||
#else
|
||||
if (__get_user(mcp, &ucp->uc_regs))
|
||||
return -EFAULT;
|
||||
if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
restore_sigmask(&set);
|
||||
if (restore_user_regs(regs, mcp, sig))
|
||||
@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
|
||||
{
|
||||
struct sig_dbg_op op;
|
||||
int i;
|
||||
unsigned char tmp;
|
||||
unsigned long new_msr = regs->msr;
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
unsigned long new_dbcr0 = current->thread.dbcr0;
|
||||
#endif
|
||||
|
||||
for (i=0; i<ndbg; i++) {
|
||||
if (__copy_from_user(&op, dbg, sizeof(op)))
|
||||
if (copy_from_user(&op, dbg + i, sizeof(op)))
|
||||
return -EFAULT;
|
||||
switch (op.dbg_type) {
|
||||
case SIG_DBG_SINGLE_STEPPING:
|
||||
@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
|
||||
current->thread.dbcr0 = new_dbcr0;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
|
||||
|| __get_user(tmp, (u8 __user *) ctx)
|
||||
|| __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* If we get a fault copying the context into the kernel's
|
||||
* image of the user's registers, we can't just return -EFAULT
|
||||
|
@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
|
||||
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
|
||||
if (err)
|
||||
return err;
|
||||
if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
|
||||
return -EFAULT;
|
||||
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
|
||||
if (v_regs != 0 && (msr & MSR_VEC) != 0)
|
||||
err |= __copy_from_user(current->thread.vr, v_regs,
|
||||
|
@ -125,14 +125,13 @@ static void __init cell_init_early(void)
|
||||
|
||||
static int __init cell_probe(void)
|
||||
{
|
||||
/* XXX This is temporary, the Cell maintainer will come up with
|
||||
* more appropriate detection logic
|
||||
*/
|
||||
unsigned long root = of_get_flat_dt_root();
|
||||
if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
|
||||
of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1157,6 +1157,7 @@ EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
|
||||
/* some quirks for platform function decoding */
|
||||
enum {
|
||||
pmac_i2c_quirk_invmask = 0x00000001u,
|
||||
pmac_i2c_quirk_skip = 0x00000002u,
|
||||
};
|
||||
|
||||
static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
|
||||
@ -1172,6 +1173,15 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
|
||||
/* XXX Study device-tree's & apple drivers are get the quirks
|
||||
* right !
|
||||
*/
|
||||
/* Workaround: It seems that running the clockspreading
|
||||
* properties on the eMac will cause lockups during boot.
|
||||
* The machine seems to work fine without that. So for now,
|
||||
* let's make sure i2c-hwclock doesn't match about "imic"
|
||||
* clocks and we'll figure out if we really need to do
|
||||
* something special about those later.
|
||||
*/
|
||||
{ "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
|
||||
{ "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
|
||||
{ "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
|
||||
{ "i2c-cpu-voltage", NULL, 0},
|
||||
{ "temp-monitor", NULL, 0 },
|
||||
@ -1198,6 +1208,8 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
|
||||
if (p->compatible &&
|
||||
!device_is_compatible(np, p->compatible))
|
||||
continue;
|
||||
if (p->quirks & pmac_i2c_quirk_skip)
|
||||
break;
|
||||
callback(np, p->quirks);
|
||||
break;
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/prom.h>
|
||||
@ -546,6 +547,7 @@ struct pmf_device {
|
||||
|
||||
static LIST_HEAD(pmf_devices);
|
||||
static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_MUTEX(pmf_irq_mutex);
|
||||
|
||||
static void pmf_release_device(struct kref *kref)
|
||||
{
|
||||
@ -864,15 +866,17 @@ int pmf_register_irq_client(struct device_node *target,
|
||||
|
||||
spin_lock_irqsave(&pmf_lock, flags);
|
||||
func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
|
||||
if (func == NULL) {
|
||||
spin_unlock_irqrestore(&pmf_lock, flags);
|
||||
if (func)
|
||||
func = pmf_get_function(func);
|
||||
spin_unlock_irqrestore(&pmf_lock, flags);
|
||||
if (func == NULL)
|
||||
return -ENODEV;
|
||||
}
|
||||
mutex_lock(&pmf_irq_mutex);
|
||||
if (list_empty(&func->irq_clients))
|
||||
func->dev->handlers->irq_enable(func);
|
||||
list_add(&client->link, &func->irq_clients);
|
||||
client->func = func;
|
||||
spin_unlock_irqrestore(&pmf_lock, flags);
|
||||
mutex_unlock(&pmf_irq_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -881,16 +885,16 @@ EXPORT_SYMBOL_GPL(pmf_register_irq_client);
|
||||
void pmf_unregister_irq_client(struct pmf_irq_client *client)
|
||||
{
|
||||
struct pmf_function *func = client->func;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(func == NULL);
|
||||
|
||||
spin_lock_irqsave(&pmf_lock, flags);
|
||||
mutex_lock(&pmf_irq_mutex);
|
||||
client->func = NULL;
|
||||
list_del(&client->link);
|
||||
if (list_empty(&func->irq_clients))
|
||||
func->dev->handlers->irq_disable(func);
|
||||
spin_unlock_irqrestore(&pmf_lock, flags);
|
||||
mutex_unlock(&pmf_irq_mutex);
|
||||
pmf_put_function(func);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
|
||||
|
||||
|
@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
|
||||
|
||||
static int __init pSeries_probe(void)
|
||||
{
|
||||
unsigned long root = of_get_flat_dt_root();
|
||||
char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
|
||||
"device_type", NULL);
|
||||
if (dtype == NULL)
|
||||
@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
|
||||
if (strcmp(dtype, "chrp"))
|
||||
return 0;
|
||||
|
||||
/* Cell blades firmware claims to be chrp while it's not. Until this
|
||||
* is fixed, we need to avoid those here.
|
||||
*/
|
||||
if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
|
||||
of_flat_dt_is_compatible(root, "IBM,CBEA"))
|
||||
return 0;
|
||||
|
||||
DBG("pSeries detected, looking for LPAR capability...\n");
|
||||
|
||||
/* Now try to figure out if we are running on LPAR */
|
||||
|
@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
|
||||
"clock-frequency", 0);
|
||||
cpu_data(id).prom_node = cpu_node;
|
||||
cpu_data(id).mid = cpu_get_hwmid(cpu_node);
|
||||
|
||||
/* this is required to tune the scheduler correctly */
|
||||
/* is it possible to have CPUs with different cache sizes? */
|
||||
if (id == boot_cpu_id) {
|
||||
int cache_line,cache_nlines;
|
||||
cache_line = 0x20;
|
||||
cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
|
||||
cache_nlines = 0x8000;
|
||||
cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
|
||||
max_cache_size = cache_line * cache_nlines;
|
||||
}
|
||||
if (cpu_data(id).mid < 0)
|
||||
panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/config.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/pstate.h>
|
||||
@ -493,6 +494,35 @@ tlb_fixup_done:
|
||||
call prom_init
|
||||
mov %l7, %o0 ! OpenPROM cif handler
|
||||
|
||||
/* Initialize current_thread_info()->cpu as early as possible.
|
||||
* In order to do that accurately we have to patch up the get_cpuid()
|
||||
* assembler sequences. And that, in turn, requires that we know
|
||||
* if we are on a Starfire box or not. While we're here, patch up
|
||||
* the sun4v sequences as well.
|
||||
*/
|
||||
call check_if_starfire
|
||||
nop
|
||||
call per_cpu_patch
|
||||
nop
|
||||
call sun4v_patch
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
call hard_smp_processor_id
|
||||
nop
|
||||
cmp %o0, NR_CPUS
|
||||
blu,pt %xcc, 1f
|
||||
nop
|
||||
call boot_cpu_id_too_large
|
||||
nop
|
||||
/* Not reached... */
|
||||
|
||||
1:
|
||||
#else
|
||||
mov 0, %o0
|
||||
#endif
|
||||
stb %o0, [%g6 + TI_CPU]
|
||||
|
||||
/* Off we go.... */
|
||||
call start_kernel
|
||||
nop
|
||||
|
@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
|
||||
|
||||
/* SUN4V PCI configuration space accessors. */
|
||||
|
||||
static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
|
||||
struct pdev_entry {
|
||||
struct pdev_entry *next;
|
||||
u32 devhandle;
|
||||
unsigned int bus;
|
||||
unsigned int device;
|
||||
unsigned int func;
|
||||
};
|
||||
|
||||
#define PDEV_HTAB_SIZE 16
|
||||
#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
|
||||
static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
|
||||
|
||||
static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
|
||||
{
|
||||
if (bus == pbm->pci_first_busno) {
|
||||
if (device == 0 && func == 0)
|
||||
return 0;
|
||||
return 1;
|
||||
unsigned int val;
|
||||
|
||||
val = (devhandle ^ (devhandle >> 4));
|
||||
val ^= bus;
|
||||
val ^= device;
|
||||
val ^= func;
|
||||
|
||||
return val & PDEV_HTAB_MASK;
|
||||
}
|
||||
|
||||
static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
|
||||
{
|
||||
struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
struct pdev_entry **slot;
|
||||
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
|
||||
p->next = *slot;
|
||||
*slot = p;
|
||||
|
||||
p->devhandle = devhandle;
|
||||
p->bus = bus;
|
||||
p->device = device;
|
||||
p->func = func;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Recursively descend into the OBP device tree, rooted at toplevel_node,
|
||||
* looking for a PCI device matching bus and devfn.
|
||||
*/
|
||||
static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
|
||||
{
|
||||
toplevel_node = prom_getchild(toplevel_node);
|
||||
|
||||
while (toplevel_node != 0) {
|
||||
int ret = obp_find(pregs, toplevel_node, bus, devfn);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
|
||||
sizeof(*pregs) * PROMREG_MAX);
|
||||
if (ret == 0 || ret == -1)
|
||||
goto next_sibling;
|
||||
|
||||
if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
|
||||
((pregs[0].phys_hi >> 8) & 0xff) == devfn)
|
||||
break;
|
||||
|
||||
next_sibling:
|
||||
toplevel_node = prom_getsibling(toplevel_node);
|
||||
}
|
||||
|
||||
return toplevel_node;
|
||||
}
|
||||
|
||||
static int pdev_htab_populate(struct pci_pbm_info *pbm)
|
||||
{
|
||||
struct linux_prom_pci_registers pr[PROMREG_MAX];
|
||||
u32 devhandle = pbm->devhandle;
|
||||
unsigned int bus;
|
||||
|
||||
for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
|
||||
unsigned int devfn;
|
||||
|
||||
for (devfn = 0; devfn < 256; devfn++) {
|
||||
unsigned int device = PCI_SLOT(devfn);
|
||||
unsigned int func = PCI_FUNC(devfn);
|
||||
|
||||
if (obp_find(pr, pbm->prom_node, bus, devfn)) {
|
||||
int err = pdev_htab_add(devhandle, bus,
|
||||
device, func);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
|
||||
{
|
||||
struct pdev_entry *p;
|
||||
|
||||
p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
|
||||
while (p) {
|
||||
if (p->devhandle == devhandle &&
|
||||
p->bus == bus &&
|
||||
p->device == device &&
|
||||
p->func == func)
|
||||
break;
|
||||
|
||||
p = p->next;
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
|
||||
{
|
||||
if (bus < pbm->pci_first_busno ||
|
||||
bus > pbm->pci_last_busno)
|
||||
return 1;
|
||||
return 0;
|
||||
return pdev_find(pbm->devhandle, bus, device, func) == NULL;
|
||||
}
|
||||
|
||||
static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
|
||||
@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
|
||||
|
||||
pci_sun4v_get_bus_range(pbm);
|
||||
pci_sun4v_iommu_init(pbm);
|
||||
|
||||
pdev_htab_populate(pbm);
|
||||
}
|
||||
|
||||
void sun4v_pci_init(int node, char *model_name)
|
||||
|
@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE];
|
||||
|
||||
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
|
||||
|
||||
static void __init per_cpu_patch(void)
|
||||
void __init per_cpu_patch(void)
|
||||
{
|
||||
struct cpuid_patch_entry *p;
|
||||
unsigned long ver;
|
||||
@ -280,7 +280,7 @@ static void __init per_cpu_patch(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init sun4v_patch(void)
|
||||
void __init sun4v_patch(void)
|
||||
{
|
||||
struct sun4v_1insn_patch_entry *p1;
|
||||
struct sun4v_2insn_patch_entry *p2;
|
||||
@ -315,6 +315,15 @@ static void __init sun4v_patch(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __init boot_cpu_id_too_large(int cpu)
|
||||
{
|
||||
prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
|
||||
cpu, NR_CPUS);
|
||||
prom_halt();
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
/* Initialize PROM console and command line. */
|
||||
@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
conswitchp = &prom_con;
|
||||
#endif
|
||||
|
||||
/* Work out if we are starfire early on */
|
||||
check_if_starfire();
|
||||
|
||||
/* Now we know enough to patch the get_cpuid sequences
|
||||
* used by trap code.
|
||||
*/
|
||||
per_cpu_patch();
|
||||
|
||||
sun4v_patch();
|
||||
|
||||
boot_flags_init(*cmdline_p);
|
||||
|
||||
idprom_init();
|
||||
|
@ -1264,7 +1264,6 @@ void __init smp_tick_init(void)
|
||||
boot_cpu_id = hard_smp_processor_id();
|
||||
current_tick_offset = timer_tick_offset;
|
||||
|
||||
cpu_set(boot_cpu_id, cpu_online_map);
|
||||
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
|
||||
}
|
||||
|
||||
@ -1288,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init smp_tune_scheduling(void)
|
||||
{
|
||||
int instance, node;
|
||||
unsigned int def, smallest = ~0U;
|
||||
|
||||
def = ((tlb_type == hypervisor) ?
|
||||
(3 * 1024 * 1024) :
|
||||
(4 * 1024 * 1024));
|
||||
|
||||
instance = 0;
|
||||
while (!cpu_find_by_instance(instance, &node, NULL)) {
|
||||
unsigned int val;
|
||||
|
||||
val = prom_getintdefault(node, "ecache-size", def);
|
||||
if (val < smallest)
|
||||
smallest = val;
|
||||
|
||||
instance++;
|
||||
}
|
||||
|
||||
/* Any value less than 256K is nonsense. */
|
||||
if (smallest < (256U * 1024U))
|
||||
smallest = 256 * 1024;
|
||||
|
||||
max_cache_size = smallest;
|
||||
|
||||
if (smallest < 1U * 1024U * 1024U)
|
||||
printk(KERN_INFO "Using max_cache_size of %uKB\n",
|
||||
smallest / 1024U);
|
||||
else
|
||||
printk(KERN_INFO "Using max_cache_size of %uMB\n",
|
||||
smallest / 1024U / 1024U);
|
||||
}
|
||||
|
||||
/* Constrain the number of cpus to max_cpus. */
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
@ -1323,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
|
||||
smp_store_cpu_info(boot_cpu_id);
|
||||
smp_tune_scheduling();
|
||||
}
|
||||
|
||||
/* Set this up early so that things like the scheduler can init
|
||||
@ -1345,18 +1379,6 @@ void __init smp_setup_cpu_possible_map(void)
|
||||
|
||||
void __devinit smp_prepare_boot_cpu(void)
|
||||
{
|
||||
int cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu >= NR_CPUS) {
|
||||
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
current_thread_info()->cpu = cpu;
|
||||
__local_per_cpu_offset = __per_cpu_offset(cpu);
|
||||
|
||||
cpu_set(smp_processor_id(), cpu_online_map);
|
||||
cpu_set(smp_processor_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
int __devinit __cpu_up(unsigned int cpu)
|
||||
@ -1433,4 +1455,7 @@ void __init setup_per_cpu_areas(void)
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++, ptr += size)
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
|
||||
/* Setup %g5 for the boot cpu. */
|
||||
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
|
||||
}
|
||||
|
@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
|
||||
};
|
||||
}
|
||||
|
||||
static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
|
||||
extern void __show_regs(struct pt_regs * regs);
|
||||
|
||||
static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
|
||||
pfx,
|
||||
ent->err_raddr, ent->err_size, ent->err_cpu);
|
||||
|
||||
__show_regs(regs);
|
||||
|
||||
if ((cnt = atomic_read(ocnt)) != 0) {
|
||||
atomic_set(ocnt, 0);
|
||||
wmb();
|
||||
@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
|
||||
|
||||
put_cpu();
|
||||
|
||||
sun4v_log_error(&local_copy, cpu,
|
||||
sun4v_log_error(regs, &local_copy, cpu,
|
||||
KERN_ERR "RESUMABLE ERROR",
|
||||
&sun4v_resum_oflow_cnt);
|
||||
}
|
||||
@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
|
||||
}
|
||||
#endif
|
||||
|
||||
sun4v_log_error(&local_copy, cpu,
|
||||
sun4v_log_error(regs, &local_copy, cpu,
|
||||
KERN_EMERG "NON-RESUMABLE ERROR",
|
||||
&sun4v_nonresum_oflow_cnt);
|
||||
|
||||
@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
|
||||
void die_if_kernel(char *str, struct pt_regs *regs)
|
||||
{
|
||||
static int die_counter;
|
||||
extern void __show_regs(struct pt_regs * regs);
|
||||
extern void smp_report_regs(void);
|
||||
int count = 0;
|
||||
|
||||
|
@ -165,8 +165,9 @@ csum_partial_end_cruft:
|
||||
sll %g1, 8, %g1
|
||||
or %o5, %g1, %o4
|
||||
|
||||
1: add %o2, %o4, %o2
|
||||
1: addcc %o2, %o4, %o2
|
||||
addc %g0, %o2, %o2
|
||||
|
||||
csum_partial_finish:
|
||||
retl
|
||||
mov %o2, %o0
|
||||
srl %o2, 0, %o0
|
||||
|
@ -221,11 +221,12 @@ FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
|
||||
sll %g1, 8, %g1
|
||||
or %o5, %g1, %o4
|
||||
|
||||
1: add %o3, %o4, %o3
|
||||
1: addcc %o3, %o4, %o3
|
||||
addc %g0, %o3, %o3
|
||||
|
||||
70:
|
||||
retl
|
||||
mov %o3, %o0
|
||||
srl %o3, 0, %o0
|
||||
|
||||
95: mov 0, GLOBAL_SPARE
|
||||
brlez,pn %o2, 4f
|
||||
|
@ -33,5 +33,9 @@ include $(srctree)/arch/i386/Makefile.cpu
|
||||
# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
|
||||
cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
|
||||
|
||||
# Prevent sprintf in nfsd from being converted to strcpy and resulting in
|
||||
# an unresolved reference.
|
||||
cflags-y += -ffreestanding
|
||||
|
||||
CFLAGS += $(cflags-y)
|
||||
USER_CFLAGS += $(cflags-y)
|
||||
|
@ -120,20 +120,11 @@ extern int is_syscall(unsigned long addr);
|
||||
extern void free_irq(unsigned int, void *);
|
||||
extern int cpu(void);
|
||||
|
||||
extern void time_init_kern(void);
|
||||
|
||||
/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
|
||||
extern int __cant_sleep(void);
|
||||
extern void segv_handler(int sig, union uml_pt_regs *regs);
|
||||
extern void sigio_handler(int sig, union uml_pt_regs *regs);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Overrides for Emacs so that we follow Linus's tabbing style.
|
||||
* Emacs will notice this stuff at the end of the file and automatically
|
||||
* adjust the settings for this buffer only. This must remain at the end
|
||||
* of the file.
|
||||
* ---------------------------------------------------------------------------
|
||||
* Local variables:
|
||||
* c-file-style: "linux"
|
||||
* End:
|
||||
*/
|
||||
|
@ -84,6 +84,16 @@ void timer_irq(union uml_pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void time_init_kern(void)
|
||||
{
|
||||
unsigned long long nsecs;
|
||||
|
||||
nsecs = os_nsecs();
|
||||
set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
|
||||
-nsecs % BILLION);
|
||||
}
|
||||
|
||||
void do_boot_timer_handler(struct sigcontext * sc)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
@ -59,7 +59,7 @@ static __init void do_uml_initcalls(void)
|
||||
initcall_t *call;
|
||||
|
||||
call = &__uml_initcall_start;
|
||||
while (call < &__uml_initcall_end){;
|
||||
while (call < &__uml_initcall_end){
|
||||
(*call)();
|
||||
call++;
|
||||
}
|
||||
|
@ -81,20 +81,12 @@ void uml_idle_timer(void)
|
||||
set_interval(ITIMER_REAL);
|
||||
}
|
||||
|
||||
extern void ktime_get_ts(struct timespec *ts);
|
||||
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
|
||||
|
||||
void time_init(void)
|
||||
{
|
||||
struct timespec now;
|
||||
|
||||
if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
|
||||
panic("Couldn't set SIGVTALRM handler");
|
||||
set_interval(ITIMER_VIRTUAL);
|
||||
|
||||
do_posix_clock_monotonic_gettime(&now);
|
||||
wall_to_monotonic.tv_sec = -now.tv_sec;
|
||||
wall_to_monotonic.tv_nsec = -now.tv_nsec;
|
||||
time_init_kern();
|
||||
}
|
||||
|
||||
unsigned long long os_nsecs(void)
|
||||
|
@ -99,11 +99,12 @@ long sys_ipc (uint call, int first, int second,
|
||||
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
return sys_semtimedop(first, (struct sembuf *) ptr, second,
|
||||
NULL);
|
||||
return sys_semtimedop(first, (struct sembuf __user *) ptr,
|
||||
second, NULL);
|
||||
case SEMTIMEDOP:
|
||||
return sys_semtimedop(first, (struct sembuf *) ptr, second,
|
||||
(const struct timespec *) fifth);
|
||||
return sys_semtimedop(first, (struct sembuf __user *) ptr,
|
||||
second,
|
||||
(const struct timespec __user *) fifth);
|
||||
case SEMGET:
|
||||
return sys_semget (first, second, third);
|
||||
case SEMCTL: {
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "skas.h"
|
||||
|
||||
static int copy_sc_from_user_skas(struct pt_regs *regs,
|
||||
struct sigcontext *from)
|
||||
struct sigcontext __user *from)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@ -54,7 +54,8 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
|
||||
return(err);
|
||||
}
|
||||
|
||||
int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
|
||||
int copy_sc_to_user_skas(struct sigcontext __user *to,
|
||||
struct _fpstate __user *to_fp,
|
||||
struct pt_regs *regs, unsigned long mask,
|
||||
unsigned long sp)
|
||||
{
|
||||
@ -106,10 +107,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MODE_TT
|
||||
int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
|
||||
int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
|
||||
int fpsize)
|
||||
{
|
||||
struct _fpstate *to_fp, *from_fp;
|
||||
struct _fpstate *to_fp;
|
||||
struct _fpstate __user *from_fp;
|
||||
unsigned long sigs;
|
||||
int err;
|
||||
|
||||
@ -124,13 +126,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
|
||||
return(err);
|
||||
}
|
||||
|
||||
int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
|
||||
int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp,
|
||||
struct sigcontext *from, int fpsize, unsigned long sp)
|
||||
{
|
||||
struct _fpstate *to_fp, *from_fp;
|
||||
struct _fpstate __user *to_fp;
|
||||
struct _fpstate *from_fp;
|
||||
int err;
|
||||
|
||||
to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
|
||||
to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
|
||||
from_fp = from->fpstate;
|
||||
err = copy_to_user(to, from, sizeof(*to));
|
||||
/* The SP in the sigcontext is the updated one for the signal
|
||||
@ -158,7 +161,8 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
|
||||
return(ret);
|
||||
}
|
||||
|
||||
static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
|
||||
static int copy_sc_to_user(struct sigcontext __user *to,
|
||||
struct _fpstate __user *fp,
|
||||
struct pt_regs *from, unsigned long mask,
|
||||
unsigned long sp)
|
||||
{
|
||||
@ -169,7 +173,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
|
||||
|
||||
struct rt_sigframe
|
||||
{
|
||||
char *pretcode;
|
||||
char __user *pretcode;
|
||||
struct ucontext uc;
|
||||
struct siginfo info;
|
||||
};
|
||||
@ -188,7 +192,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
|
||||
|
||||
frame = (struct rt_sigframe __user *)
|
||||
round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8;
|
||||
frame = (struct rt_sigframe *) ((unsigned long) frame - 128);
|
||||
frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128);
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
|
||||
goto out;
|
||||
|
@ -45,7 +45,7 @@ static long arch_prctl_tt(int code, unsigned long addr)
|
||||
case ARCH_GET_GS:
|
||||
ret = arch_prctl(code, (unsigned long) &tmp);
|
||||
if(!ret)
|
||||
ret = put_user(tmp, &addr);
|
||||
ret = put_user(tmp, (long __user *)addr);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -339,7 +339,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
|
||||
struct mm_struct *mm = current->mm;
|
||||
int i, ret;
|
||||
|
||||
stack_base = IA32_STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
|
||||
stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
|
||||
mm->arg_start = bprm->p + stack_base;
|
||||
|
||||
bprm->p += stack_base;
|
||||
@ -357,7 +357,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
|
||||
{
|
||||
mpnt->vm_mm = mm;
|
||||
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
|
||||
mpnt->vm_end = IA32_STACK_TOP;
|
||||
mpnt->vm_end = stack_top;
|
||||
if (executable_stack == EXSTACK_ENABLE_X)
|
||||
mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
|
||||
else if (executable_stack == EXSTACK_DISABLE_X)
|
||||
|
@ -149,7 +149,7 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsi
|
||||
addr = start;
|
||||
if (addr > ei->addr + ei->size)
|
||||
continue;
|
||||
while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
|
||||
while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
|
||||
;
|
||||
last = addr + size;
|
||||
if (last > ei->addr + ei->size)
|
||||
|
@ -281,12 +281,7 @@ tracesys:
|
||||
ja 1f
|
||||
movq %r10,%rcx /* fixup for C */
|
||||
call *sys_call_table(,%rax,8)
|
||||
movq %rax,RAX-ARGOFFSET(%rsp)
|
||||
1: SAVE_REST
|
||||
movq %rsp,%rdi
|
||||
call syscall_trace_leave
|
||||
RESTORE_TOP_OF_STACK %rbx
|
||||
RESTORE_REST
|
||||
1: movq %rax,RAX-ARGOFFSET(%rsp)
|
||||
/* Use IRET because user could have changed frame */
|
||||
jmp int_ret_from_sys_call
|
||||
CFI_ENDPROC
|
||||
|
@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static int nvidia_hpet_detected __initdata;
|
||||
|
||||
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
|
||||
{
|
||||
nvidia_hpet_detected = 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
|
||||
off. Check for an Nvidia or VIA PCI bridge and turn it off.
|
||||
Use pci direct infrastructure because this runs before the PCI subsystem.
|
||||
@ -317,11 +329,19 @@ void __init check_ioapic(void)
|
||||
return;
|
||||
case PCI_VENDOR_ID_NVIDIA:
|
||||
#ifdef CONFIG_ACPI
|
||||
/* All timer overrides on Nvidia
|
||||
seem to be wrong. Skip them. */
|
||||
acpi_skip_timer_override = 1;
|
||||
printk(KERN_INFO
|
||||
"Nvidia board detected. Ignoring ACPI timer override.\n");
|
||||
/*
|
||||
* All timer overrides on Nvidia are
|
||||
* wrong unless HPET is enabled.
|
||||
*/
|
||||
nvidia_hpet_detected = 0;
|
||||
acpi_table_parse(ACPI_HPET,
|
||||
nvidia_hpet_check);
|
||||
if (nvidia_hpet_detected == 0) {
|
||||
acpi_skip_timer_override = 1;
|
||||
printk(KERN_INFO "Nvidia board "
|
||||
"detected. Ignoring ACPI "
|
||||
"timer override.\n");
|
||||
}
|
||||
#endif
|
||||
/* RED-PEN skip them on mptables too? */
|
||||
return;
|
||||
|
@ -54,6 +54,10 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
|
||||
else
|
||||
#endif
|
||||
node = numa_node_id();
|
||||
|
||||
if (node < first_node(node_online_map))
|
||||
node = first_node(node_online_map);
|
||||
|
||||
page = alloc_pages_node(node, gfp, order);
|
||||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
@ -631,10 +631,8 @@ static int __init pci_iommu_init(void)
|
||||
printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
|
||||
if (end_pfn > MAX_DMA32_PFN) {
|
||||
printk(KERN_ERR "WARNING more than 4GB of memory "
|
||||
"but IOMMU not compiled in.\n"
|
||||
KERN_ERR "WARNING 32bit PCI may malfunction.\n"
|
||||
KERN_ERR "You might want to enable "
|
||||
"CONFIG_GART_IOMMU\n");
|
||||
"but IOMMU not available.\n"
|
||||
KERN_ERR "WARNING 32bit PCI may malfunction.\n");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ int pmtimer_mark_offset(void)
|
||||
offset_delay = delta % (USEC_PER_SEC / HZ);
|
||||
|
||||
rdtscll(tsc);
|
||||
vxtime.last_tsc = tsc - offset_delay * cpu_khz;
|
||||
vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000;
|
||||
|
||||
/* don't calculate delay for first run,
|
||||
or if we've got less then a tick */
|
||||
|
@ -1051,7 +1051,7 @@ static void srat_detect_node(void)
|
||||
for now. */
|
||||
node = apicid_to_node[hard_smp_processor_id()];
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = 0;
|
||||
node = first_node(node_online_map);
|
||||
numa_set_node(cpu, node);
|
||||
|
||||
if (acpi_numa > 0)
|
||||
|
@ -399,8 +399,10 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
||||
/* First clean up the node list */
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
cutoff_node(i, start, end);
|
||||
if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
|
||||
if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
|
||||
unparse_node(i);
|
||||
node_set_offline(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (acpi_numa <= 0)
|
||||
|
@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
|
||||
* initialize elevator private data (as_data), and alloc a arq for
|
||||
* each request on the free lists
|
||||
*/
|
||||
static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
static void *as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
{
|
||||
struct as_data *ad;
|
||||
int i;
|
||||
|
||||
if (!arq_pool)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
|
||||
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
|
||||
if (!ad)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
memset(ad, 0, sizeof(*ad));
|
||||
|
||||
ad->q = q; /* Identify what queue the data belongs to */
|
||||
@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
GFP_KERNEL, q->node);
|
||||
if (!ad->hash) {
|
||||
kfree(ad);
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
|
||||
@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
if (!ad->arq_pool) {
|
||||
kfree(ad->hash);
|
||||
kfree(ad);
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* anticipatory scheduling helpers */
|
||||
@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
ad->antic_expire = default_antic_expire;
|
||||
ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
|
||||
ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
|
||||
e->elevator_data = ad;
|
||||
|
||||
ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
|
||||
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
|
||||
if (ad->write_batch_count < 2)
|
||||
ad->write_batch_count = 2;
|
||||
|
||||
return 0;
|
||||
return ad;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -33,7 +33,7 @@ static int cfq_slice_idle = HZ / 70;
|
||||
|
||||
#define CFQ_KEY_ASYNC (0)
|
||||
|
||||
static DEFINE_RWLOCK(cfq_exit_lock);
|
||||
static DEFINE_SPINLOCK(cfq_exit_lock);
|
||||
|
||||
/*
|
||||
* for the hash of cfqq inside the cfqd
|
||||
@ -133,6 +133,7 @@ struct cfq_data {
|
||||
mempool_t *crq_pool;
|
||||
|
||||
int rq_in_driver;
|
||||
int hw_tag;
|
||||
|
||||
/*
|
||||
* schedule slice state info
|
||||
@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
|
||||
|
||||
/*
|
||||
* if queue was preempted, just add to front to be fair. busy_rr
|
||||
* isn't sorted.
|
||||
* isn't sorted, but insert at the back for fairness.
|
||||
*/
|
||||
if (preempted || list == &cfqd->busy_rr) {
|
||||
list_add(&cfqq->cfq_list, list);
|
||||
if (preempted)
|
||||
list = list->prev;
|
||||
|
||||
list_add_tail(&cfqq->cfq_list, list);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
|
||||
cfqd->rq_in_driver++;
|
||||
|
||||
/*
|
||||
* If the depth is larger 1, it really could be queueing. But lets
|
||||
* make the mark a little higher - idling could still be good for
|
||||
* low queueing, and a low queueing number could also just indicate
|
||||
* a SCSI mid layer like behaviour where limit+1 is often seen.
|
||||
*/
|
||||
if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
|
||||
cfqd->hw_tag = 1;
|
||||
}
|
||||
|
||||
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
|
||||
@ -878,6 +891,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
|
||||
if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
|
||||
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
|
||||
|
||||
/*
|
||||
* If no new queues are available, check if the busy list has some
|
||||
* before falling back to idle io.
|
||||
*/
|
||||
if (!cfqq && !list_empty(&cfqd->busy_rr))
|
||||
cfqq = list_entry_cfqq(cfqd->busy_rr.next);
|
||||
|
||||
/*
|
||||
* if we have idle queues and no rt or be queues had pending
|
||||
* requests, either allow immediate service if the grace period
|
||||
@ -1284,7 +1304,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
|
||||
/*
|
||||
* put the reference this task is holding to the various queues
|
||||
*/
|
||||
read_lock_irqsave(&cfq_exit_lock, flags);
|
||||
spin_lock_irqsave(&cfq_exit_lock, flags);
|
||||
|
||||
n = rb_first(&ioc->cic_root);
|
||||
while (n != NULL) {
|
||||
@ -1294,7 +1314,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
|
||||
n = rb_next(n);
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&cfq_exit_lock, flags);
|
||||
spin_unlock_irqrestore(&cfq_exit_lock, flags);
|
||||
}
|
||||
|
||||
static struct cfq_io_context *
|
||||
@ -1400,17 +1420,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
|
||||
struct cfq_io_context *cic;
|
||||
struct rb_node *n;
|
||||
|
||||
write_lock(&cfq_exit_lock);
|
||||
spin_lock(&cfq_exit_lock);
|
||||
|
||||
n = rb_first(&ioc->cic_root);
|
||||
while (n != NULL) {
|
||||
cic = rb_entry(n, struct cfq_io_context, rb_node);
|
||||
|
||||
|
||||
changed_ioprio(cic);
|
||||
n = rb_next(n);
|
||||
}
|
||||
|
||||
write_unlock(&cfq_exit_lock);
|
||||
spin_unlock(&cfq_exit_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1458,7 +1478,8 @@ retry:
|
||||
* set ->slice_left to allow preemption for a new process
|
||||
*/
|
||||
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
|
||||
cfq_mark_cfqq_idle_window(cfqq);
|
||||
if (!cfqd->hw_tag)
|
||||
cfq_mark_cfqq_idle_window(cfqq);
|
||||
cfq_mark_cfqq_prio_changed(cfqq);
|
||||
cfq_init_prio_data(cfqq);
|
||||
}
|
||||
@ -1475,9 +1496,10 @@ out:
|
||||
static void
|
||||
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
|
||||
{
|
||||
read_lock(&cfq_exit_lock);
|
||||
spin_lock(&cfq_exit_lock);
|
||||
rb_erase(&cic->rb_node, &ioc->cic_root);
|
||||
read_unlock(&cfq_exit_lock);
|
||||
list_del_init(&cic->queue_list);
|
||||
spin_unlock(&cfq_exit_lock);
|
||||
kmem_cache_free(cfq_ioc_pool, cic);
|
||||
atomic_dec(&ioc_count);
|
||||
}
|
||||
@ -1545,11 +1567,11 @@ restart:
|
||||
BUG();
|
||||
}
|
||||
|
||||
read_lock(&cfq_exit_lock);
|
||||
spin_lock(&cfq_exit_lock);
|
||||
rb_link_node(&cic->rb_node, parent, p);
|
||||
rb_insert_color(&cic->rb_node, &ioc->cic_root);
|
||||
list_add(&cic->queue_list, &cfqd->cic_list);
|
||||
read_unlock(&cfq_exit_lock);
|
||||
spin_unlock(&cfq_exit_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1648,7 +1670,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
{
|
||||
int enable_idle = cfq_cfqq_idle_window(cfqq);
|
||||
|
||||
if (!cic->ioc->task || !cfqd->cfq_slice_idle)
|
||||
if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
|
||||
enable_idle = 0;
|
||||
else if (sample_valid(cic->ttime_samples)) {
|
||||
if (cic->ttime_mean > cfqd->cfq_slice_idle)
|
||||
@ -1739,14 +1761,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
|
||||
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
|
||||
|
||||
cic = crq->io_context;
|
||||
|
||||
/*
|
||||
* we never wait for an async request and we don't allow preemption
|
||||
* of an async request. so just return early
|
||||
*/
|
||||
if (!cfq_crq_is_sync(crq))
|
||||
if (!cfq_crq_is_sync(crq)) {
|
||||
/*
|
||||
* sync process issued an async request, if it's waiting
|
||||
* then expire it and kick rq handling.
|
||||
*/
|
||||
if (cic == cfqd->active_cic &&
|
||||
del_timer(&cfqd->idle_slice_timer)) {
|
||||
cfq_slice_expired(cfqd, 0);
|
||||
cfq_start_queueing(cfqd, cfqq);
|
||||
}
|
||||
return;
|
||||
|
||||
cic = crq->io_context;
|
||||
}
|
||||
|
||||
cfq_update_io_thinktime(cfqd, cic);
|
||||
cfq_update_io_seektime(cfqd, cic, crq);
|
||||
@ -2164,10 +2196,9 @@ static void cfq_idle_class_timer(unsigned long data)
|
||||
* race with a non-idle queue, reset timer
|
||||
*/
|
||||
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
|
||||
if (!time_after_eq(jiffies, end)) {
|
||||
cfqd->idle_class_timer.expires = end;
|
||||
add_timer(&cfqd->idle_class_timer);
|
||||
} else
|
||||
if (!time_after_eq(jiffies, end))
|
||||
mod_timer(&cfqd->idle_class_timer, end);
|
||||
else
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
|
||||
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
||||
@ -2187,7 +2218,7 @@ static void cfq_exit_queue(elevator_t *e)
|
||||
|
||||
cfq_shutdown_timer_wq(cfqd);
|
||||
|
||||
write_lock(&cfq_exit_lock);
|
||||
spin_lock(&cfq_exit_lock);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (cfqd->active_queue)
|
||||
@ -2210,7 +2241,7 @@ static void cfq_exit_queue(elevator_t *e)
|
||||
}
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
write_unlock(&cfq_exit_lock);
|
||||
spin_unlock(&cfq_exit_lock);
|
||||
|
||||
cfq_shutdown_timer_wq(cfqd);
|
||||
|
||||
@ -2220,14 +2251,14 @@ static void cfq_exit_queue(elevator_t *e)
|
||||
kfree(cfqd);
|
||||
}
|
||||
|
||||
static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||
static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||
{
|
||||
struct cfq_data *cfqd;
|
||||
int i;
|
||||
|
||||
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
|
||||
if (!cfqd)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
|
||||
memset(cfqd, 0, sizeof(*cfqd));
|
||||
|
||||
@ -2257,8 +2288,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
|
||||
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
|
||||
|
||||
e->elevator_data = cfqd;
|
||||
|
||||
cfqd->queue = q;
|
||||
|
||||
cfqd->max_queued = q->nr_requests / 4;
|
||||
@ -2285,14 +2314,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
|
||||
cfqd->cfq_slice_idle = cfq_slice_idle;
|
||||
|
||||
return 0;
|
||||
return cfqd;
|
||||
out_crqpool:
|
||||
kfree(cfqd->cfq_hash);
|
||||
out_cfqhash:
|
||||
kfree(cfqd->crq_hash);
|
||||
out_crqhash:
|
||||
kfree(cfqd);
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void cfq_slab_kill(void)
|
||||
|
@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
|
||||
* initialize elevator private data (deadline_data), and alloc a drq for
|
||||
* each request on the free lists
|
||||
*/
|
||||
static int deadline_init_queue(request_queue_t *q, elevator_t *e)
|
||||
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
|
||||
{
|
||||
struct deadline_data *dd;
|
||||
int i;
|
||||
|
||||
if (!drq_pool)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
|
||||
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
|
||||
if (!dd)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
memset(dd, 0, sizeof(*dd));
|
||||
|
||||
dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
|
||||
GFP_KERNEL, q->node);
|
||||
if (!dd->hash) {
|
||||
kfree(dd);
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
|
||||
@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
|
||||
if (!dd->drq_pool) {
|
||||
kfree(dd->hash);
|
||||
kfree(dd);
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < DL_HASH_ENTRIES; i++)
|
||||
@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
|
||||
dd->writes_starved = writes_starved;
|
||||
dd->front_merges = 1;
|
||||
dd->fifo_batch = fifo_batch;
|
||||
e->elevator_data = dd;
|
||||
return 0;
|
||||
return dd;
|
||||
}
|
||||
|
||||
static void deadline_put_request(request_queue_t *q, struct request *rq)
|
||||
|
@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
|
||||
return e;
|
||||
}
|
||||
|
||||
static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
|
||||
static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
|
||||
{
|
||||
int ret = 0;
|
||||
return eq->ops->elevator_init_fn(q, eq);
|
||||
}
|
||||
|
||||
static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
|
||||
void *data)
|
||||
{
|
||||
q->elevator = eq;
|
||||
|
||||
if (eq->ops->elevator_init_fn)
|
||||
ret = eq->ops->elevator_init_fn(q, eq);
|
||||
|
||||
return ret;
|
||||
eq->elevator_data = data;
|
||||
}
|
||||
|
||||
static char chosen_elevator[16];
|
||||
@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
|
||||
struct elevator_type *e = NULL;
|
||||
struct elevator_queue *eq;
|
||||
int ret = 0;
|
||||
void *data;
|
||||
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
q->last_merge = NULL;
|
||||
@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
|
||||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = elevator_attach(q, eq);
|
||||
if (ret)
|
||||
data = elevator_init_queue(q, eq);
|
||||
if (!data) {
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
elevator_attach(q, eq, data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __elv_unregister_queue(elevator_t *e)
|
||||
{
|
||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&e->kobj);
|
||||
}
|
||||
|
||||
void elv_unregister_queue(struct request_queue *q)
|
||||
{
|
||||
if (q) {
|
||||
elevator_t *e = q->elevator;
|
||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&e->kobj);
|
||||
}
|
||||
if (q)
|
||||
__elv_unregister_queue(q->elevator);
|
||||
}
|
||||
|
||||
int elv_register(struct elevator_type *e)
|
||||
@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||
{
|
||||
elevator_t *old_elevator, *e;
|
||||
void *data;
|
||||
|
||||
/*
|
||||
* Allocate new elevator
|
||||
@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
data = elevator_init_queue(q, e);
|
||||
if (!data) {
|
||||
kobject_put(&e->kobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data
|
||||
*/
|
||||
@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||
elv_drain_elevator(q);
|
||||
}
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/*
|
||||
* unregister old elevator data
|
||||
* Remember old elevator.
|
||||
*/
|
||||
elv_unregister_queue(q);
|
||||
old_elevator = q->elevator;
|
||||
|
||||
/*
|
||||
* attach and start new elevator
|
||||
*/
|
||||
if (elevator_attach(q, e))
|
||||
goto fail;
|
||||
elevator_attach(q, e, data);
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
__elv_unregister_queue(old_elevator);
|
||||
|
||||
if (elv_register_queue(q))
|
||||
goto fail_register;
|
||||
@ -837,7 +851,6 @@ fail_register:
|
||||
*/
|
||||
elevator_exit(e);
|
||||
e = NULL;
|
||||
fail:
|
||||
q->elevator = old_elevator;
|
||||
elv_register_queue(q);
|
||||
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
||||
|
@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
|
||||
return list_entry(rq->queuelist.next, struct request, queuelist);
|
||||
}
|
||||
|
||||
static int noop_init_queue(request_queue_t *q, elevator_t *e)
|
||||
static void *noop_init_queue(request_queue_t *q, elevator_t *e)
|
||||
{
|
||||
struct noop_data *nd;
|
||||
|
||||
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
|
||||
if (!nd)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
INIT_LIST_HEAD(&nd->queue);
|
||||
e->elevator_data = nd;
|
||||
return 0;
|
||||
return nd;
|
||||
}
|
||||
|
||||
static void noop_exit_queue(elevator_t *e)
|
||||
|
@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
|
||||
return_VALUE(-EBUSY);
|
||||
}
|
||||
|
||||
WARN_ON(!performance);
|
||||
|
||||
pr->performance = performance;
|
||||
|
||||
if (acpi_processor_get_performance_info(pr)) {
|
||||
@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
kfree(pr->performance->states);
|
||||
if (pr->performance)
|
||||
kfree(pr->performance->states);
|
||||
pr->performance = NULL;
|
||||
|
||||
acpi_cpufreq_remove_file(pr);
|
||||
|
@ -8,7 +8,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/pm.h>
|
||||
@ -66,6 +65,7 @@ int suspend_device(struct device * dev, pm_message_t state)
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* device_suspend - Save state and stop all devices in system.
|
||||
* @state: Power state to put each device in.
|
||||
@ -85,9 +85,6 @@ int device_suspend(pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (!is_console_suspend_safe())
|
||||
return -EINVAL;
|
||||
|
||||
down(&dpm_sem);
|
||||
down(&dpm_list_sem);
|
||||
while (!list_empty(&dpm_active) && error == 0) {
|
||||
|
@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
|
||||
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
|
||||
obj-$(CONFIG_SX) += sx.o generic_serial.o
|
||||
obj-$(CONFIG_RIO) += rio/ generic_serial.o
|
||||
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
|
||||
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
|
||||
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
|
||||
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
|
||||
obj-$(CONFIG_RAW_DRIVER) += raw.o
|
||||
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
|
||||
obj-$(CONFIG_MMTIMER) += mmtimer.o
|
||||
|
@ -86,7 +86,7 @@ config AGP_NVIDIA
|
||||
|
||||
config AGP_SIS
|
||||
tristate "SiS chipset support"
|
||||
depends on AGP && X86_32
|
||||
depends on AGP
|
||||
help
|
||||
This option gives you AGP support for the GLX component of
|
||||
X on Silicon Integrated Systems [SiS] chipsets.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user