2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Low-Level PCI Support for PC
|
|
|
|
*
|
|
|
|
* (c) 1999--2000 Martin Mares <mj@ucw.cz>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/init.h>
|
2006-02-18 17:36:55 +08:00
|
|
|
#include <linux/dmi.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/acpi.h>
|
|
|
|
#include <asm/segment.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
|
|
|
|
#include "pci.h"
|
|
|
|
|
|
|
|
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
|
|
|
|
PCI_PROBE_MMCONF;
|
|
|
|
|
2006-11-16 20:16:23 +08:00
|
|
|
static int pci_bf_sort;
|
2005-04-17 06:20:36 +08:00
|
|
|
int pci_routeirq;
|
|
|
|
int pcibios_last_bus = -1;
|
2005-03-22 12:20:42 +08:00
|
|
|
unsigned long pirq_table_addr;
|
|
|
|
struct pci_bus *pci_root_bus;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pci_raw_ops *raw_pci_ops;
|
2008-02-10 22:45:28 +08:00
|
|
|
struct pci_raw_ops *raw_pci_ext_ops;
|
|
|
|
|
|
|
|
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
|
|
int reg, int len, u32 *val)
|
|
|
|
{
|
|
|
|
if (reg < 256 && raw_pci_ops)
|
|
|
|
return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
|
|
|
|
if (raw_pci_ext_ops)
|
|
|
|
return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
|
|
int reg, int len, u32 val)
|
|
|
|
{
|
|
|
|
if (reg < 256 && raw_pci_ops)
|
|
|
|
return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
|
|
|
|
if (raw_pci_ext_ops)
|
|
|
|
return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
|
|
|
|
{
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_read(pci_domain_nr(bus), bus->number,
|
2007-10-12 04:58:30 +08:00
|
|
|
devfn, where, size, value);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
|
|
|
|
{
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_write(pci_domain_nr(bus), bus->number,
|
2007-10-12 04:58:30 +08:00
|
|
|
devfn, where, size, value);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct pci_ops pci_root_ops = {
|
|
|
|
.read = pci_read,
|
|
|
|
.write = pci_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* legacy, numa, and acpi all want to call pcibios_scan_root
|
|
|
|
* from their initcalls. This flag prevents that.
|
|
|
|
*/
|
|
|
|
int pcibios_scanned;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This interrupt-safe spinlock protects all accesses to PCI
|
|
|
|
* configuration space.
|
|
|
|
*/
|
|
|
|
DEFINE_SPINLOCK(pci_config_lock);
|
|
|
|
|
2008-03-27 16:31:18 +08:00
|
|
|
static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
|
|
|
|
{
|
|
|
|
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
|
|
|
|
printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = {
|
|
|
|
/*
|
|
|
|
* Systems where PCI IO resource ISA alignment can be skipped
|
|
|
|
* when the ISA enable bit in the bridge control is not set
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3800",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3850",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
void __init dmi_check_skip_isa_align(void)
|
|
|
|
{
|
|
|
|
dmi_check_system(can_skip_pciprobe_dmi_table);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Called after each bus is probed, but before its children
|
|
|
|
* are examined.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void __devinit pcibios_fixup_bus(struct pci_bus *b)
|
|
|
|
{
|
|
|
|
pci_read_bridge_bases(b);
|
|
|
|
}
|
|
|
|
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
/*
|
|
|
|
* Only use DMI information to set this if nothing was passed
|
|
|
|
* on the kernel command line (which was parsed earlier).
|
|
|
|
*/
|
|
|
|
|
2007-10-04 03:15:40 +08:00
|
|
|
static int __devinit set_bf_sort(const struct dmi_system_id *d)
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
{
|
|
|
|
if (pci_bf_sort == pci_bf_sort_default) {
|
|
|
|
pci_bf_sort = pci_dmi_bf;
|
|
|
|
printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-02-18 17:36:55 +08:00
|
|
|
/*
|
|
|
|
* Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
|
|
|
|
*/
|
|
|
|
#ifdef __i386__
|
2007-10-04 03:15:40 +08:00
|
|
|
static int __devinit assign_all_busses(const struct dmi_system_id *d)
|
2006-02-18 17:36:55 +08:00
|
|
|
{
|
|
|
|
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
|
|
|
|
printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
|
|
|
|
" (pci=assign-busses)\n", d->ident);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
|
|
|
|
#ifdef __i386__
|
2006-02-18 17:36:55 +08:00
|
|
|
/*
|
|
|
|
* Laptops which need pci=assign-busses to see Cardbus cards
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = assign_all_busses,
|
|
|
|
.ident = "Samsung X20 Laptop",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
#endif /* __i386__ */
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 1950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 1955",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 2900",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 2950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
|
|
|
|
},
|
|
|
|
},
|
2007-03-24 12:58:07 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge R900",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
|
|
|
|
},
|
|
|
|
},
|
2007-02-06 08:36:10 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL20p G3",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL20p G4",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL30p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL25p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL35p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL45p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL45p G2",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL460c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL465c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL480c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL685c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
|
|
|
|
},
|
|
|
|
},
|
2007-10-18 00:04:35 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-05-16 02:40:14 +08:00
|
|
|
.ident = "HP ProLiant DL360",
|
2007-10-18 00:04:35 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-05-16 02:40:14 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
|
2007-10-18 00:04:35 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-05-16 02:40:14 +08:00
|
|
|
.ident = "HP ProLiant DL380",
|
2007-10-18 00:04:35 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-05-16 02:40:14 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
|
2007-10-18 00:04:35 +08:00
|
|
|
},
|
|
|
|
},
|
2007-09-14 02:21:34 +08:00
|
|
|
#ifdef __i386__
|
|
|
|
{
|
|
|
|
.callback = assign_all_busses,
|
|
|
|
.ident = "Compaq EVO N800c",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
#endif
|
2007-11-27 03:42:19 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-07-08 00:55:26 +08:00
|
|
|
.ident = "HP ProLiant DL385 G2",
|
2007-11-27 03:42:19 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-07-08 00:55:26 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
|
2007-11-27 03:42:19 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-07-08 00:55:26 +08:00
|
|
|
.ident = "HP ProLiant DL585 G2",
|
2007-11-27 03:42:19 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-07-08 00:55:26 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
|
2007-11-27 03:42:19 +08:00
|
|
|
},
|
|
|
|
},
|
2006-02-18 17:36:55 +08:00
|
|
|
{}
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-15 06:40:37 +08:00
|
|
|
void __init dmi_check_pciprobe(void)
|
|
|
|
{
|
|
|
|
dmi_check_system(pciprobe_dmi_table);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pci_bus * __devinit pcibios_scan_root(int busnum)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus = NULL;
|
2007-07-22 05:23:39 +08:00
|
|
|
struct pci_sysdata *sd;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while ((bus = pci_find_next_bus(bus)) != NULL) {
|
|
|
|
if (bus->number == busnum) {
|
|
|
|
/* Already scanned */
|
|
|
|
return bus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-22 05:23:39 +08:00
|
|
|
/* Allocate per-root-bus (not per bus) arch-specific data.
|
|
|
|
* TODO: leak; this memory is never freed.
|
|
|
|
* It's arguable whether it's worth the trouble to care.
|
|
|
|
*/
|
|
|
|
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
|
|
|
|
if (!sd) {
|
|
|
|
printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
sd->node = get_mp_bus_to_node(busnum);
|
|
|
|
|
2005-11-24 07:44:49 +08:00
|
|
|
printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
|
|
|
|
if (!bus)
|
|
|
|
kfree(sd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
return bus;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extern u8 pci_cache_line_size;
|
|
|
|
|
|
|
|
static int __init pcibios_init(void)
|
|
|
|
{
|
|
|
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
|
|
|
|
|
if (!raw_pci_ops) {
|
2005-11-24 07:44:49 +08:00
|
|
|
printk(KERN_WARNING "PCI: System does not support PCI\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
|
|
|
|
* and P4. It's also good for 386/486s (which actually have 16)
|
|
|
|
* as quite a few PCI devices do not support smaller values.
|
|
|
|
*/
|
|
|
|
pci_cache_line_size = 32 >> 2;
|
|
|
|
if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
|
|
|
|
pci_cache_line_size = 64 >> 2; /* K7 & K8 */
|
|
|
|
else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
|
|
|
|
pci_cache_line_size = 128 >> 2; /* P4 */
|
|
|
|
|
|
|
|
pcibios_resource_survey();
|
|
|
|
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
if (pci_bf_sort >= pci_force_bf)
|
|
|
|
pci_sort_breadthfirst();
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
subsys_initcall(pcibios_init);
|
|
|
|
|
|
|
|
char * __devinit pcibios_setup(char *str)
|
|
|
|
{
|
|
|
|
if (!strcmp(str, "off")) {
|
|
|
|
pci_probe = 0;
|
|
|
|
return NULL;
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
} else if (!strcmp(str, "bfsort")) {
|
|
|
|
pci_bf_sort = pci_force_bf;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "nobfsort")) {
|
|
|
|
pci_bf_sort = pci_force_nobf;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#ifdef CONFIG_PCI_BIOS
|
|
|
|
else if (!strcmp(str, "bios")) {
|
|
|
|
pci_probe = PCI_PROBE_BIOS;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "nobios")) {
|
|
|
|
pci_probe &= ~PCI_PROBE_BIOS;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "biosirq")) {
|
|
|
|
pci_probe |= PCI_BIOS_IRQ_SCAN;
|
|
|
|
return NULL;
|
2005-03-22 12:20:42 +08:00
|
|
|
} else if (!strncmp(str, "pirqaddr=", 9)) {
|
|
|
|
pirq_table_addr = simple_strtoul(str+9, NULL, 0);
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCI_DIRECT
|
|
|
|
else if (!strcmp(str, "conf1")) {
|
|
|
|
pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else if (!strcmp(str, "conf2")) {
|
|
|
|
pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCI_MMCONFIG
|
|
|
|
else if (!strcmp(str, "nommconf")) {
|
|
|
|
pci_probe &= ~PCI_PROBE_MMCONF;
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-04-15 07:08:25 +08:00
|
|
|
else if (!strcmp(str, "check_enable_amd_mmconf")) {
|
|
|
|
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
else if (!strcmp(str, "noacpi")) {
|
|
|
|
acpi_noirq_set();
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-09-26 16:52:41 +08:00
|
|
|
else if (!strcmp(str, "noearly")) {
|
|
|
|
pci_probe |= PCI_PROBE_NOEARLY;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifndef CONFIG_X86_VISWS
|
|
|
|
else if (!strcmp(str, "usepirqmask")) {
|
|
|
|
pci_probe |= PCI_USE_PIRQ_MASK;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strncmp(str, "irqmask=", 8)) {
|
|
|
|
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
|
|
|
|
return NULL;
|
|
|
|
} else if (!strncmp(str, "lastbus=", 8)) {
|
|
|
|
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
else if (!strcmp(str, "rom")) {
|
|
|
|
pci_probe |= PCI_ASSIGN_ROMS;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "assign-busses")) {
|
|
|
|
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
|
|
|
|
return NULL;
|
2007-10-04 06:56:51 +08:00
|
|
|
} else if (!strcmp(str, "use_crs")) {
|
|
|
|
pci_probe |= PCI_USE__CRS;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (!strcmp(str, "routeirq")) {
|
|
|
|
pci_routeirq = 1;
|
|
|
|
return NULL;
|
2008-03-27 16:31:18 +08:00
|
|
|
} else if (!strcmp(str, "skip_isa_align")) {
|
|
|
|
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int pcibios_assign_all_busses(void)
|
|
|
|
{
|
|
|
|
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pcibios_enable_device(struct pci_dev *dev, int mask)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2008-03-05 02:57:01 +08:00
|
|
|
if ((err = pci_enable_resources(dev, mask)) < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
|
2007-03-28 21:36:09 +08:00
|
|
|
if (!dev->msi_enabled)
|
|
|
|
return pcibios_enable_irq(dev);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-28 11:02:00 +08:00
|
|
|
|
|
|
|
void pcibios_disable_device (struct pci_dev *dev)
|
|
|
|
{
|
2007-03-28 21:36:09 +08:00
|
|
|
if (!dev->msi_enabled && pcibios_disable_irq)
|
2005-07-28 11:02:00 +08:00
|
|
|
pcibios_disable_irq(dev);
|
|
|
|
}
|
2007-08-11 04:01:19 +08:00
|
|
|
|
2008-04-30 04:38:48 +08:00
|
|
|
struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
|
2007-08-11 04:01:19 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus = NULL;
|
|
|
|
struct pci_sysdata *sd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate per-root-bus (not per bus) arch-specific data.
|
|
|
|
* TODO: leak; this memory is never freed.
|
|
|
|
* It's arguable whether it's worth the trouble to care.
|
|
|
|
*/
|
|
|
|
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
|
|
|
|
if (!sd) {
|
|
|
|
printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
|
|
|
|
return NULL;
|
|
|
|
}
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
sd->node = node;
|
|
|
|
bus = pci_scan_bus(busno, ops, sd);
|
2007-08-11 04:01:19 +08:00
|
|
|
if (!bus)
|
|
|
|
kfree(sd);
|
|
|
|
|
|
|
|
return bus;
|
|
|
|
}
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
|
2008-04-30 04:38:48 +08:00
|
|
|
struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
|
x86: get mp_bus_to_node early
Currently, on an amd k8 system with multi ht chains, the numa_node of
pci devices under /sys/devices/pci0000:80/* is always 0, even if that
chain is on node 1 or 2 or 3.
Workaround: pcibus_to_node(bus) is used when we want to get the node that
pci_device is on.
In struct device, we already have numa_node member, and we could use
dev_to_node()/set_dev_node() to get and set numa_node in the device.
set_dev_node is called in pci_device_add() with pcibus_to_node(bus),
and pcibus_to_node uses bus->sysdata for nodeid.
The problem is when pci_add_device is called, bus->sysdata is not assigned
correct nodeid yet. The result is that numa_node will always be 0.
pcibios_scan_root and pci_scan_root could take sysdata. So we need to get
mp_bus_to_node mapping before these two are called, and thus
get_mp_bus_to_node could get correct node for sysdata in root bus.
In scanning of the root bus, all child busses will take parent bus sysdata.
So all pci_device->dev.numa_node will be assigned correctly and automatically.
Later we could use dev_to_node(&pci_dev->dev) to get numa_node, and we
could also could make other bus specific device get the correct numa_node
too.
This is an updated version of pci_sysdata and Jeff's pci_domain patch.
[ mingo@elte.hu: build fix ]
Signed-off-by: Yinghai Lu <yinghai.lu@sun.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-19 19:20:09 +08:00
|
|
|
{
|
|
|
|
return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
|
|
|
|
}
|