mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 00:26:39 +08:00
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
This commit is contained in:
commit
bfd4bda097
@ -4,6 +4,16 @@ The EtherDrive (R) HOWTO for users of 2.6 kernels is found at ...
|
||||
|
||||
It has many tips and hints!
|
||||
|
||||
The aoetools are userland programs that are designed to work with this
|
||||
driver. The aoetools are on sourceforge.
|
||||
|
||||
http://aoetools.sourceforge.net/
|
||||
|
||||
The scripts in this Documentation/aoe directory are intended to
|
||||
document the use of the driver and are not necessary if you install
|
||||
the aoetools.
|
||||
|
||||
|
||||
CREATING DEVICE NODES
|
||||
|
||||
Users of udev should find the block device nodes created
|
||||
@ -35,14 +45,15 @@ USING DEVICE NODES
|
||||
|
||||
"echo eth2 eth4 > /dev/etherd/interfaces" tells the aoe driver to
|
||||
limit ATA over Ethernet traffic to eth2 and eth4. AoE traffic from
|
||||
untrusted networks should be ignored as a matter of security.
|
||||
untrusted networks should be ignored as a matter of security. See
|
||||
also the aoe_iflist driver option described below.
|
||||
|
||||
"echo > /dev/etherd/discover" tells the driver to find out what AoE
|
||||
devices are available.
|
||||
|
||||
These character devices may disappear and be replaced by sysfs
|
||||
counterparts, so distribution maintainers are encouraged to create
|
||||
scripts that use these devices.
|
||||
counterparts. Using the commands in aoetools insulates users from
|
||||
these implementation details.
|
||||
|
||||
The block devices are named like this:
|
||||
|
||||
@ -66,7 +77,8 @@ USING SYSFS
|
||||
through which we are communicating with the remote AoE device.
|
||||
|
||||
There is a script in this directory that formats this information
|
||||
in a convenient way.
|
||||
in a convenient way. Users with aoetools can use the aoe-stat
|
||||
command.
|
||||
|
||||
root@makki root# sh Documentation/aoe/status.sh
|
||||
e10.0 eth3 up
|
||||
@ -89,3 +101,23 @@ USING SYSFS
|
||||
e4.7 eth1 up
|
||||
e4.8 eth1 up
|
||||
e4.9 eth1 up
|
||||
|
||||
Use /sys/module/aoe/parameters/aoe_iflist (or better, the driver
|
||||
option discussed below) instead of /dev/etherd/interfaces to limit
|
||||
AoE traffic to the network interfaces in the given
|
||||
whitespace-separated list. Unlike the old character device, the
|
||||
sysfs entry can be read from as well as written to.
|
||||
|
||||
It's helpful to trigger discovery after setting the list of allowed
|
||||
interfaces. The aoetools package provides an aoe-discover script
|
||||
for this purpose. You can also directly use the
|
||||
/dev/etherd/discover special file described above.
|
||||
|
||||
DRIVER OPTIONS
|
||||
|
||||
There is a boot option for the built-in aoe driver and a
|
||||
corresponding module parameter, aoe_iflist. Without this option,
|
||||
all network interfaces may be used for ATA over Ethernet. Here is a
|
||||
usage example for the module parameter.
|
||||
|
||||
modprobe aoe_iflist="eth1 eth3"
|
||||
|
@ -14,10 +14,6 @@ test ! -d "$sysd/block" && {
|
||||
echo "$me Error: sysfs is not mounted" 1>&2
|
||||
exit 1
|
||||
}
|
||||
test -z "`lsmod | grep '^aoe'`" && {
|
||||
echo "$me Error: aoe module is not loaded" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
for d in `ls -d $sysd/block/etherd* 2>/dev/null | grep -v p` end; do
|
||||
# maybe ls comes up empty, so we use "end"
|
||||
|
@ -279,6 +279,7 @@ pci_for_each_dev_reverse() Superseded by pci_find_device_reverse()
|
||||
pci_for_each_bus() Superseded by pci_find_next_bus()
|
||||
pci_find_device() Superseded by pci_get_device()
|
||||
pci_find_subsys() Superseded by pci_get_subsys()
|
||||
pci_find_slot() Superseded by pci_get_slot()
|
||||
pcibios_find_class() Superseded by pci_get_class()
|
||||
pci_find_class() Superseded by pci_get_class()
|
||||
pci_(read|write)_*_nodev() Superseded by pci_bus_(read|write)_*()
|
||||
|
@ -165,40 +165,9 @@ Description:
|
||||
These functions are intended for use by individual drivers, and are defined in
|
||||
struct pci_driver:
|
||||
|
||||
int (*save_state) (struct pci_dev *dev, u32 state);
|
||||
int (*suspend) (struct pci_dev *dev, u32 state);
|
||||
int (*suspend) (struct pci_dev *dev, pm_message_t state);
|
||||
int (*resume) (struct pci_dev *dev);
|
||||
int (*enable_wake) (struct pci_dev *dev, u32 state, int enable);
|
||||
|
||||
|
||||
save_state
|
||||
----------
|
||||
|
||||
Usage:
|
||||
|
||||
if (dev->driver && dev->driver->save_state)
|
||||
dev->driver->save_state(dev,state);
|
||||
|
||||
The driver should use this callback to save device state. It should take into
|
||||
account the current state of the device and the requested state in order to
|
||||
avoid any unnecessary operations.
|
||||
|
||||
For example, a video card that supports all 4 states (D0-D3), all controller
|
||||
context is preserved when entering D1, but the screen is placed into a low power
|
||||
state (blanked).
|
||||
|
||||
The driver can also interpret this function as a notification that it may be
|
||||
entering a sleep state in the near future. If it knows that the device cannot
|
||||
enter the requested state, either because of lack of support for it, or because
|
||||
the device is middle of some critical operation, then it should fail.
|
||||
|
||||
This function should not be used to set any state in the device or the driver
|
||||
because the device may not actually enter the sleep state (e.g. another driver
|
||||
later causes causes a global state transition to fail).
|
||||
|
||||
Note that in intermediate low power states, a device's I/O and memory spaces may
|
||||
be disabled and may not be available in subsequent transitions to lower power
|
||||
states.
|
||||
int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable);
|
||||
|
||||
|
||||
suspend
|
||||
|
@ -280,6 +280,10 @@ config ISA
|
||||
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
|
||||
newer boards don't support it. If you have ISA, say Y, otherwise N.
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
config PCI
|
||||
bool
|
||||
depends on !ALPHA_JENSEN
|
||||
|
@ -266,6 +266,10 @@ config ISA_DMA
|
||||
depends on FOOTBRIDGE_HOST || ARCH_SHARK
|
||||
default y
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
config PCI
|
||||
bool "PCI support" if ARCH_INTEGRATOR_AP
|
||||
default y if ARCH_SHARK || FOOTBRIDGE_HOST || ARCH_IOP3XX || ARCH_IXP4XX || ARCH_IXP2000
|
||||
|
@ -18,48 +18,30 @@
|
||||
* Please select one of the following when turning on debugging.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
#if defined(CONFIG_DEBUG_DC21285_PORT)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x42000000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
str \rb, [r3, #0x160]
|
||||
.endm
|
||||
#elif defined(CONFIG_DEBUG_ICEDCC)
|
||||
|
||||
#include <asm/arch/debug-macro.S>
|
||||
|
||||
#if defined(CONFIG_DEBUG_ICEDCC)
|
||||
.macro loadsp, rb
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
mcr p14, 0, \rb, c0, c1, 0
|
||||
.macro writeb, ch, rb
|
||||
mcr p14, 0, \ch, c0, c1, 0
|
||||
.endm
|
||||
#elif defined(CONFIG_FOOTBRIDGE)
|
||||
#else
|
||||
.macro writeb, ch, rb
|
||||
senduart \ch, \rb
|
||||
.endm
|
||||
|
||||
#if defined(CONFIG_FOOTBRIDGE) || \
|
||||
defined(CONFIG_ARCH_RPC) || \
|
||||
defined(CONFIG_ARCH_INTEGRATOR) || \
|
||||
defined(CONFIG_ARCH_PXA) || \
|
||||
defined(CONFIG_ARCH_IXP4XX) || \
|
||||
defined(CONFIG_ARCH_IXP2000) || \
|
||||
defined(CONFIG_ARCH_LH7A40X) || \
|
||||
defined(CONFIG_ARCH_OMAP)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x7c000000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0x3f8]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_RPC)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x03000000
|
||||
orr \rb, \rb, #0x00010000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0x3f8 << 2]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_INTEGRATOR)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x16000000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_PXA) /* Xscale-type */
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x40000000
|
||||
orr \rb, \rb, #0x00100000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0]
|
||||
addruart \rb
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_SA1100)
|
||||
.macro loadsp, rb
|
||||
@ -70,64 +52,21 @@
|
||||
add \rb, \rb, #0x00010000 @ Ser1
|
||||
# endif
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
str \rb, [r3, #0x14] @ UTDR
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_IXP4XX)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0xc8000000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
str \rb, [r3, #0]
|
||||
#elif defined(CONFIG_ARCH_IXP2000)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0xc0000000
|
||||
orr \rb, \rb, #0x00030000
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
str \rb, [r3, #0]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_LH7A40X)
|
||||
.macro loadsp, rb
|
||||
ldr \rb, =0x80000700 @ UART2 UARTBASE
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_OMAP)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0xff000000 @ physical base address
|
||||
add \rb, \rb, #0x00fb0000
|
||||
#if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3)
|
||||
add \rb, \rb, #0x00000800
|
||||
#endif
|
||||
#ifdef CONFIG_OMAP_LL_DEBUG_UART3
|
||||
add \rb, \rb, #0x00009000
|
||||
#endif
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_IOP331)
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0xff000000
|
||||
orr \rb, \rb, #0x00ff0000
|
||||
orr \rb, \rb, #0x0000f700 @ location of the UART
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
str \rb, [r3, #0]
|
||||
.endm
|
||||
#elif defined(CONFIG_ARCH_S3C2410)
|
||||
.macro loadsp, rb
|
||||
.macro loadsp, rb
|
||||
mov \rb, #0x50000000
|
||||
add \rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT
|
||||
.endm
|
||||
.macro writeb, rb
|
||||
strb \rb, [r3, #0x20]
|
||||
.endm
|
||||
#else
|
||||
#error no serial architecture defined
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
.macro kputc,val
|
||||
@ -734,7 +673,7 @@ puts: loadsp r3
|
||||
1: ldrb r2, [r0], #1
|
||||
teq r2, #0
|
||||
moveq pc, lr
|
||||
2: writeb r2
|
||||
2: writeb r2, r3
|
||||
mov r1, #0x00020000
|
||||
3: subs r1, r1, #1
|
||||
bne 3b
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/arch/imxfb.h>
|
||||
#include <asm/hardware.h>
|
||||
|
||||
#include <asm/mach/map.h>
|
||||
@ -228,6 +229,14 @@ static struct platform_device imx_uart2_device = {
|
||||
.resource = imx_uart2_resources,
|
||||
};
|
||||
|
||||
static struct imxfb_mach_info imx_fb_info;
|
||||
|
||||
void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
|
||||
{
|
||||
memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info));
|
||||
}
|
||||
EXPORT_SYMBOL(set_imx_fb_info);
|
||||
|
||||
static struct resource imxfb_resources[] = {
|
||||
[0] = {
|
||||
.start = 0x00205000,
|
||||
@ -241,9 +250,16 @@ static struct resource imxfb_resources[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static u64 fb_dma_mask = ~(u64)0;
|
||||
|
||||
static struct platform_device imxfb_device = {
|
||||
.name = "imx-fb",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = &imx_fb_info,
|
||||
.dma_mask = &fb_dma_mask,
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(imxfb_resources),
|
||||
.resource = imxfb_resources,
|
||||
};
|
||||
|
@ -216,7 +216,9 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
|
||||
write_seqlock(&xtime_lock);
|
||||
|
||||
// ...clear the interrupt
|
||||
/*
|
||||
* clear the interrupt
|
||||
*/
|
||||
timer1->TimerClear = 1;
|
||||
|
||||
timer_tick(regs);
|
||||
|
@ -501,15 +501,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int
|
||||
pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
if (mask >= SZ_64M - 1 )
|
||||
return 0;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int
|
||||
pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
@ -520,7 +511,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(pci_set_dma_mask);
|
||||
EXPORT_SYMBOL(pci_dac_set_dma_mask);
|
||||
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
|
||||
EXPORT_SYMBOL(ixp4xx_pci_read);
|
||||
EXPORT_SYMBOL(ixp4xx_pci_write);
|
||||
|
@ -413,6 +413,7 @@ config CPU_BPREDICT_DISABLE
|
||||
config HAS_TLS_REG
|
||||
bool
|
||||
depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3
|
||||
default y
|
||||
help
|
||||
This selects support for the CP15 thread register.
|
||||
It is defined to be available on ARMv6 or later. However
|
||||
|
@ -89,6 +89,10 @@ config PAGESIZE_16
|
||||
machine with 4MB of memory.
|
||||
endmenu
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
menu "General setup"
|
||||
|
||||
# Compressed boot loader in ROM. Yes, we really want to ask about
|
||||
|
@ -1173,6 +1173,10 @@ source "drivers/pci/pcie/Kconfig"
|
||||
|
||||
source "drivers/pci/Kconfig"
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
config ISA
|
||||
bool "ISA support"
|
||||
depends on !(X86_VOYAGER || X86_VISWS)
|
||||
|
@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
|
||||
If you are compiling a kernel that will run under SGI's IA-64
|
||||
simulator (Medusa) then say Y, otherwise say N.
|
||||
|
||||
config IA64_SGI_SN_XP
|
||||
tristate "Support communication between SGI SSIs"
|
||||
depends on MSPEC
|
||||
help
|
||||
An SGI machine can be divided into multiple Single System
|
||||
Images which act independently of each other and have
|
||||
hardware based memory protection from the others. Enabling
|
||||
this feature will allow for direct communication between SSIs
|
||||
based on a network adapter and DMA messaging.
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default "18"
|
||||
@ -261,6 +271,15 @@ config HOTPLUG_CPU
|
||||
can be controlled through /sys/devices/system/cpu/cpu#.
|
||||
Say N if you want to disable CPU hotplug.
|
||||
|
||||
config SCHED_SMT
|
||||
bool "SMT scheduler support"
|
||||
depends on SMP
|
||||
default off
|
||||
help
|
||||
Improves the CPU scheduler's decision making when dealing with
|
||||
Intel IA64 chips with MultiThreading at a cost of slightly increased
|
||||
overhead in some places. If unsure say N here.
|
||||
|
||||
config PREEMPT
|
||||
bool "Preemptible Kernel"
|
||||
help
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.11-rc2
|
||||
# Sat Jan 22 11:17:02 2005
|
||||
# Linux kernel version: 2.6.12-rc3
|
||||
# Tue May 3 15:55:04 2005
|
||||
#
|
||||
|
||||
#
|
||||
@ -10,6 +10,7 @@
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
CONFIG_CLEAN_COMPILE=y
|
||||
CONFIG_LOCK_KERNEL=y
|
||||
CONFIG_INIT_ENV_ARG_LIMIT=32
|
||||
|
||||
#
|
||||
# General setup
|
||||
@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_AUDIT is not set
|
||||
CONFIG_LOG_BUF_SHIFT=20
|
||||
CONFIG_HOTPLUG=y
|
||||
CONFIG_KOBJECT_UEVENT=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
# CONFIG_CPUSETS is not set
|
||||
# CONFIG_EMBEDDED is not set
|
||||
CONFIG_KALLSYMS=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_KALLSYMS_EXTRA_PASS is not set
|
||||
CONFIG_PRINTK=y
|
||||
CONFIG_BUG=y
|
||||
CONFIG_BASE_FULL=y
|
||||
CONFIG_FUTEX=y
|
||||
CONFIG_EPOLL=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
CONFIG_SHMEM=y
|
||||
CONFIG_CC_ALIGN_FUNCTIONS=0
|
||||
CONFIG_CC_ALIGN_LABELS=0
|
||||
CONFIG_CC_ALIGN_LOOPS=0
|
||||
CONFIG_CC_ALIGN_JUMPS=0
|
||||
# CONFIG_TINY_SHMEM is not set
|
||||
CONFIG_BASE_SMALL=0
|
||||
|
||||
#
|
||||
# Loadable module support
|
||||
@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=4
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
# CONFIG_SCHED_SMT is not set
|
||||
# CONFIG_PREEMPT is not set
|
||||
CONFIG_HAVE_DEC_LOCK=y
|
||||
CONFIG_IA32_SUPPORT=y
|
||||
@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y
|
||||
# CONFIG_PCI_MSI is not set
|
||||
CONFIG_PCI_LEGACY_PROC=y
|
||||
CONFIG_PCI_NAMES=y
|
||||
# CONFIG_PCI_DEBUG is not set
|
||||
|
||||
#
|
||||
# PCI Hotplug Support
|
||||
@ -151,10 +157,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m
|
||||
#
|
||||
# CONFIG_PCCARD is not set
|
||||
|
||||
#
|
||||
# PC-card bridges
|
||||
#
|
||||
|
||||
#
|
||||
# Device Drivers
|
||||
#
|
||||
@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
# CONFIG_BLK_DEV_SX8 is not set
|
||||
# CONFIG_BLK_DEV_UB is not set
|
||||
CONFIG_BLK_DEV_RAM=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=16
|
||||
CONFIG_BLK_DEV_RAM_SIZE=4096
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_INITRAMFS_SOURCE=""
|
||||
# CONFIG_CDROM_PKTCDVD is not set
|
||||
|
||||
@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y
|
||||
# CONFIG_SCSI_BUSLOGIC is not set
|
||||
# CONFIG_SCSI_DMX3191D is not set
|
||||
# CONFIG_SCSI_EATA is not set
|
||||
# CONFIG_SCSI_EATA_PIO is not set
|
||||
# CONFIG_SCSI_FUTURE_DOMAIN is not set
|
||||
# CONFIG_SCSI_GDTH is not set
|
||||
# CONFIG_SCSI_IPS is not set
|
||||
@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
|
||||
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
|
||||
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
|
||||
# CONFIG_SCSI_IPR is not set
|
||||
# CONFIG_SCSI_QLOGIC_ISP is not set
|
||||
CONFIG_SCSI_QLOGIC_FC=y
|
||||
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
|
||||
CONFIG_SCSI_QLOGIC_1280=y
|
||||
@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m
|
||||
CONFIG_SCSI_QLA2300=m
|
||||
CONFIG_SCSI_QLA2322=m
|
||||
# CONFIG_SCSI_QLA6312 is not set
|
||||
# CONFIG_SCSI_LPFC is not set
|
||||
# CONFIG_SCSI_DC395x is not set
|
||||
# CONFIG_SCSI_DC390T is not set
|
||||
# CONFIG_SCSI_DEBUG is not set
|
||||
@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m
|
||||
CONFIG_DM_SNAPSHOT=m
|
||||
CONFIG_DM_MIRROR=m
|
||||
CONFIG_DM_ZERO=m
|
||||
# CONFIG_DM_MULTIPATH is not set
|
||||
|
||||
#
|
||||
# Fusion MPT device support
|
||||
@ -386,7 +389,6 @@ CONFIG_NET=y
|
||||
#
|
||||
CONFIG_PACKET=y
|
||||
# CONFIG_PACKET_MMAP is not set
|
||||
CONFIG_NETLINK_DEV=y
|
||||
CONFIG_UNIX=y
|
||||
# CONFIG_NET_KEY is not set
|
||||
CONFIG_INET=y
|
||||
@ -446,7 +448,6 @@ CONFIG_DUMMY=m
|
||||
# CONFIG_BONDING is not set
|
||||
# CONFIG_EQUALIZER is not set
|
||||
# CONFIG_TUN is not set
|
||||
# CONFIG_ETHERTAP is not set
|
||||
|
||||
#
|
||||
# ARCnet devices
|
||||
@ -484,7 +485,6 @@ CONFIG_NET_PCI=y
|
||||
# CONFIG_DGRS is not set
|
||||
CONFIG_EEPRO100=m
|
||||
CONFIG_E100=m
|
||||
# CONFIG_E100_NAPI is not set
|
||||
# CONFIG_FEALNX is not set
|
||||
# CONFIG_NATSEMI is not set
|
||||
# CONFIG_NE2K_PCI is not set
|
||||
@ -565,25 +565,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
|
||||
# CONFIG_INPUT_EVDEV is not set
|
||||
# CONFIG_INPUT_EVBUG is not set
|
||||
|
||||
#
|
||||
# Input I/O drivers
|
||||
#
|
||||
CONFIG_GAMEPORT=m
|
||||
CONFIG_SOUND_GAMEPORT=m
|
||||
# CONFIG_GAMEPORT_NS558 is not set
|
||||
# CONFIG_GAMEPORT_L4 is not set
|
||||
# CONFIG_GAMEPORT_EMU10K1 is not set
|
||||
# CONFIG_GAMEPORT_VORTEX is not set
|
||||
# CONFIG_GAMEPORT_FM801 is not set
|
||||
# CONFIG_GAMEPORT_CS461X is not set
|
||||
CONFIG_SERIO=y
|
||||
CONFIG_SERIO_I8042=y
|
||||
# CONFIG_SERIO_SERPORT is not set
|
||||
# CONFIG_SERIO_CT82C710 is not set
|
||||
# CONFIG_SERIO_PCIPS2 is not set
|
||||
CONFIG_SERIO_LIBPS2=y
|
||||
# CONFIG_SERIO_RAW is not set
|
||||
|
||||
#
|
||||
# Input Device Drivers
|
||||
#
|
||||
@ -601,6 +582,24 @@ CONFIG_MOUSE_PS2=y
|
||||
# CONFIG_INPUT_TOUCHSCREEN is not set
|
||||
# CONFIG_INPUT_MISC is not set
|
||||
|
||||
#
|
||||
# Hardware I/O ports
|
||||
#
|
||||
CONFIG_SERIO=y
|
||||
CONFIG_SERIO_I8042=y
|
||||
# CONFIG_SERIO_SERPORT is not set
|
||||
# CONFIG_SERIO_PCIPS2 is not set
|
||||
CONFIG_SERIO_LIBPS2=y
|
||||
# CONFIG_SERIO_RAW is not set
|
||||
CONFIG_GAMEPORT=m
|
||||
# CONFIG_GAMEPORT_NS558 is not set
|
||||
# CONFIG_GAMEPORT_L4 is not set
|
||||
# CONFIG_GAMEPORT_EMU10K1 is not set
|
||||
# CONFIG_GAMEPORT_VORTEX is not set
|
||||
# CONFIG_GAMEPORT_FM801 is not set
|
||||
# CONFIG_GAMEPORT_CS461X is not set
|
||||
CONFIG_SOUND_GAMEPORT=m
|
||||
|
||||
#
|
||||
# Character devices
|
||||
#
|
||||
@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y
|
||||
# CONFIG_SYNCLINK is not set
|
||||
# CONFIG_SYNCLINKMP is not set
|
||||
# CONFIG_N_HDLC is not set
|
||||
# CONFIG_SPECIALIX is not set
|
||||
# CONFIG_SX is not set
|
||||
# CONFIG_STALDRV is not set
|
||||
|
||||
#
|
||||
@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
#
|
||||
CONFIG_SERIAL_CORE=y
|
||||
CONFIG_SERIAL_CORE_CONSOLE=y
|
||||
# CONFIG_SERIAL_JSM is not set
|
||||
CONFIG_UNIX98_PTYS=y
|
||||
CONFIG_LEGACY_PTYS=y
|
||||
CONFIG_LEGACY_PTY_COUNT=256
|
||||
@ -670,6 +672,12 @@ CONFIG_HPET=y
|
||||
# CONFIG_HPET_RTC_IRQ is not set
|
||||
CONFIG_HPET_MMAP=y
|
||||
CONFIG_MAX_RAW_DEVS=256
|
||||
# CONFIG_HANGCHECK_TIMER is not set
|
||||
|
||||
#
|
||||
# TPM devices
|
||||
#
|
||||
# CONFIG_TCG_TPM is not set
|
||||
|
||||
#
|
||||
# I2C support
|
||||
@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256
|
||||
#
|
||||
CONFIG_VGA_CONSOLE=y
|
||||
CONFIG_DUMMY_CONSOLE=y
|
||||
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
|
||||
|
||||
#
|
||||
# Sound
|
||||
@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y
|
||||
#
|
||||
# USB support
|
||||
#
|
||||
CONFIG_USB_ARCH_HAS_HCD=y
|
||||
CONFIG_USB_ARCH_HAS_OHCI=y
|
||||
CONFIG_USB=y
|
||||
# CONFIG_USB_DEBUG is not set
|
||||
|
||||
@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y
|
||||
# CONFIG_USB_DYNAMIC_MINORS is not set
|
||||
# CONFIG_USB_SUSPEND is not set
|
||||
# CONFIG_USB_OTG is not set
|
||||
CONFIG_USB_ARCH_HAS_HCD=y
|
||||
CONFIG_USB_ARCH_HAS_OHCI=y
|
||||
|
||||
#
|
||||
# USB Host Controller Drivers
|
||||
@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m
|
||||
# CONFIG_USB_EHCI_SPLIT_ISO is not set
|
||||
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
|
||||
CONFIG_USB_OHCI_HCD=m
|
||||
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
|
||||
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
||||
CONFIG_USB_UHCI_HCD=y
|
||||
# CONFIG_USB_SL811_HCD is not set
|
||||
|
||||
@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y
|
||||
#
|
||||
CONFIG_USB_STORAGE=m
|
||||
# CONFIG_USB_STORAGE_DEBUG is not set
|
||||
# CONFIG_USB_STORAGE_RW_DETECT is not set
|
||||
# CONFIG_USB_STORAGE_DATAFAB is not set
|
||||
# CONFIG_USB_STORAGE_FREECOM is not set
|
||||
# CONFIG_USB_STORAGE_ISD200 is not set
|
||||
# CONFIG_USB_STORAGE_DPCM is not set
|
||||
# CONFIG_USB_STORAGE_HP8200e is not set
|
||||
# CONFIG_USB_STORAGE_USBAT is not set
|
||||
# CONFIG_USB_STORAGE_SDDR09 is not set
|
||||
# CONFIG_USB_STORAGE_SDDR55 is not set
|
||||
# CONFIG_USB_STORAGE_JUMPSHOT is not set
|
||||
@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y
|
||||
# CONFIG_USB_PEGASUS is not set
|
||||
# CONFIG_USB_RTL8150 is not set
|
||||
# CONFIG_USB_USBNET is not set
|
||||
# CONFIG_USB_MON is not set
|
||||
|
||||
#
|
||||
# USB port drivers
|
||||
@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y
|
||||
# CONFIG_USB_PHIDGETKIT is not set
|
||||
# CONFIG_USB_PHIDGETSERVO is not set
|
||||
# CONFIG_USB_IDMOUSE is not set
|
||||
# CONFIG_USB_SISUSBVGA is not set
|
||||
# CONFIG_USB_TEST is not set
|
||||
|
||||
#
|
||||
@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
|
||||
CONFIG_REISERFS_FS_SECURITY=y
|
||||
# CONFIG_JFS_FS is not set
|
||||
CONFIG_FS_POSIX_ACL=y
|
||||
|
||||
#
|
||||
# XFS support
|
||||
#
|
||||
CONFIG_XFS_FS=y
|
||||
CONFIG_XFS_EXPORT=y
|
||||
# CONFIG_XFS_RT is not set
|
||||
# CONFIG_XFS_QUOTA is not set
|
||||
# CONFIG_XFS_SECURITY is not set
|
||||
@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y
|
||||
CONFIG_NFSD_TCP=y
|
||||
CONFIG_LOCKD=m
|
||||
CONFIG_LOCKD_V4=y
|
||||
CONFIG_EXPORTFS=m
|
||||
CONFIG_EXPORTFS=y
|
||||
CONFIG_SUNRPC=m
|
||||
CONFIG_SUNRPC_GSS=m
|
||||
CONFIG_RPCSEC_GSS_KRB5=m
|
||||
@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y
|
||||
#
|
||||
# Kernel hacking
|
||||
#
|
||||
# CONFIG_PRINTK_TIME is not set
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_LOG_BUF_SHIFT=20
|
||||
# CONFIG_SCHEDSTATS is not set
|
||||
# CONFIG_DEBUG_SLAB is not set
|
||||
# CONFIG_DEBUG_SPINLOCK is not set
|
||||
@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m
|
||||
# CONFIG_CRYPTO_SHA256 is not set
|
||||
# CONFIG_CRYPTO_SHA512 is not set
|
||||
# CONFIG_CRYPTO_WP512 is not set
|
||||
# CONFIG_CRYPTO_TGR192 is not set
|
||||
CONFIG_CRYPTO_DES=m
|
||||
# CONFIG_CRYPTO_BLOWFISH is not set
|
||||
# CONFIG_CRYPTO_TWOFISH is not set
|
||||
|
@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus)
|
||||
static void __init
|
||||
sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
|
||||
{
|
||||
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
union acpi_object *obj;
|
||||
acpi_handle phandle;
|
||||
unsigned int node;
|
||||
int pxm;
|
||||
|
||||
ioc->node = MAX_NUMNODES;
|
||||
|
||||
/*
|
||||
* Check for a _PXM on this node first. We don't typically see
|
||||
* one here, so we'll end up getting it from the parent.
|
||||
*/
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
|
||||
if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
|
||||
return;
|
||||
pxm = acpi_get_pxm(handle);
|
||||
|
||||
/* Reset the acpi buffer */
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
|
||||
&buffer)))
|
||||
return;
|
||||
}
|
||||
|
||||
if (!buffer.length || !buffer.pointer)
|
||||
if (pxm < 0)
|
||||
return;
|
||||
|
||||
obj = buffer.pointer;
|
||||
|
||||
if (obj->type != ACPI_TYPE_INTEGER ||
|
||||
obj->integer.value >= MAX_PXM_DOMAINS) {
|
||||
acpi_os_free(buffer.pointer);
|
||||
return;
|
||||
}
|
||||
|
||||
node = pxm_to_nid_map[obj->integer.value];
|
||||
acpi_os_free(buffer.pointer);
|
||||
node = pxm_to_nid_map[pxm];
|
||||
|
||||
if (node >= MAX_NUMNODES || !node_online(node))
|
||||
return;
|
||||
|
@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
|
||||
union acpi_object *obj;
|
||||
struct acpi_table_iosapic *iosapic;
|
||||
unsigned int gsi_base;
|
||||
int node;
|
||||
int pxm, node;
|
||||
|
||||
/* Only care about objects w/ a method that returns the MADT */
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
|
||||
gsi_base = iosapic->global_irq_base;
|
||||
|
||||
acpi_os_free(buffer.pointer);
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
|
||||
/*
|
||||
* OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
|
||||
* OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
|
||||
* us which node to associate this with.
|
||||
*/
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
|
||||
pxm = acpi_get_pxm(handle);
|
||||
if (pxm < 0)
|
||||
return AE_OK;
|
||||
|
||||
if (!buffer.length || !buffer.pointer)
|
||||
return AE_OK;
|
||||
|
||||
obj = buffer.pointer;
|
||||
|
||||
if (obj->type != ACPI_TYPE_INTEGER ||
|
||||
obj->integer.value >= MAX_PXM_DOMAINS) {
|
||||
acpi_os_free(buffer.pointer);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
node = pxm_to_nid_map[obj->integer.value];
|
||||
acpi_os_free(buffer.pointer);
|
||||
node = pxm_to_nid_map[pxm];
|
||||
|
||||
if (node >= MAX_NUMNODES || !node_online(node) ||
|
||||
cpus_empty(node_to_cpumask(node)))
|
||||
|
@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
|
||||
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
|
||||
.mem.offset 8,0
|
||||
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
|
||||
END(ia64_ret_from_ia32_execve_syscall)
|
||||
END(ia64_ret_from_ia32_execve)
|
||||
// fall through
|
||||
#endif /* CONFIG_IA32_SUPPORT */
|
||||
GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
|
@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
|
||||
spin_unlock(&mca_bh_lock);
|
||||
|
||||
/* This process is about to be killed itself */
|
||||
force_sig(SIGKILL, current);
|
||||
schedule();
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
|
||||
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
|
||||
psr2->cpl = 0;
|
||||
psr2->ri = 0;
|
||||
psr2->i = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
invala // clear RSE ?
|
||||
@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
;;
|
||||
alloc r16=ar.pfs,0,2,1,0 // make a new frame
|
||||
;;
|
||||
mov ar.rsc=0
|
||||
;;
|
||||
mov r13=IA64_KR(CURRENT) // current task pointer
|
||||
;;
|
||||
adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13
|
||||
mov r2=r13
|
||||
;;
|
||||
ld8 r12=[r12] // stack pointer
|
||||
addl r22=IA64_RBS_OFFSET,r2
|
||||
;;
|
||||
mov ar.bspstore=r22
|
||||
;;
|
||||
addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
|
||||
;;
|
||||
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
|
||||
;;
|
||||
st1 [r2]=r0 // clear current->thread.on_ustack flag
|
||||
mov loc0=r16
|
||||
movl loc1=mca_handler_bh // recovery C function
|
||||
;;
|
||||
@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
|
||||
;;
|
||||
mov loc1=rp
|
||||
;;
|
||||
br.call.sptk.many rp=b6 // not return ...
|
||||
ssm psr.i
|
||||
;;
|
||||
br.call.sptk.many rp=b6 // does not return ...
|
||||
;;
|
||||
mov ar.pfs=loc0
|
||||
mov rp=loc1
|
||||
|
@ -1265,6 +1265,8 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
|
||||
|
||||
extern void update_pal_halt_status(int);
|
||||
|
||||
static int
|
||||
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
{
|
||||
@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||
is_syswide,
|
||||
cpu));
|
||||
|
||||
/*
|
||||
* disable default_idle() to go to PAL_HALT
|
||||
*/
|
||||
update_pal_halt_status(0);
|
||||
|
||||
UNLOCK_PFS(flags);
|
||||
|
||||
return 0;
|
||||
@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
|
||||
is_syswide,
|
||||
cpu));
|
||||
|
||||
/*
|
||||
* if possible, enable default_idle() to go into PAL_HALT
|
||||
*/
|
||||
if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
|
||||
update_pal_halt_status(1);
|
||||
|
||||
UNLOCK_PFS(flags);
|
||||
|
||||
return 0;
|
||||
@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
|
||||
req->load_pid,
|
||||
ctx->ctx_state));
|
||||
return -EINVAL;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
|
||||
@ -4704,16 +4717,26 @@ recheck:
|
||||
if (task == current || ctx->ctx_fl_system) return 0;
|
||||
|
||||
/*
|
||||
* if context is UNLOADED we are safe to go
|
||||
* we are monitoring another thread
|
||||
*/
|
||||
if (state == PFM_CTX_UNLOADED) return 0;
|
||||
|
||||
/*
|
||||
* no command can operate on a zombie context
|
||||
*/
|
||||
if (state == PFM_CTX_ZOMBIE) {
|
||||
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
|
||||
return -EINVAL;
|
||||
switch(state) {
|
||||
case PFM_CTX_UNLOADED:
|
||||
/*
|
||||
* if context is UNLOADED we are safe to go
|
||||
*/
|
||||
return 0;
|
||||
case PFM_CTX_ZOMBIE:
|
||||
/*
|
||||
* no command can operate on a zombie context
|
||||
*/
|
||||
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
|
||||
return -EINVAL;
|
||||
case PFM_CTX_MASKED:
|
||||
/*
|
||||
* PMU state has been saved to software even though
|
||||
* the thread may still be running.
|
||||
*/
|
||||
if (cmd != PFM_UNLOAD_CONTEXT) return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include "sigframe.h"
|
||||
|
||||
void (*ia64_mark_idle)(int);
|
||||
static cpumask_t cpu_idle_map;
|
||||
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
|
||||
|
||||
unsigned long boot_option_idle_override = 0;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
|
||||
ia64_do_signal(oldset, scr, in_syscall);
|
||||
}
|
||||
|
||||
static int pal_halt = 1;
|
||||
static int pal_halt = 1;
|
||||
static int can_do_pal_halt = 1;
|
||||
|
||||
static int __init nohalt_setup(char * str)
|
||||
{
|
||||
pal_halt = 0;
|
||||
@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
|
||||
}
|
||||
__setup("nohalt", nohalt_setup);
|
||||
|
||||
void
|
||||
update_pal_halt_status(int status)
|
||||
{
|
||||
can_do_pal_halt = pal_halt && status;
|
||||
}
|
||||
|
||||
/*
|
||||
* We use this if we don't have any better idle routine..
|
||||
*/
|
||||
void
|
||||
default_idle (void)
|
||||
{
|
||||
unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
|
||||
|
||||
while (!need_resched())
|
||||
if (pal_halt && !pmu_active)
|
||||
if (can_do_pal_halt)
|
||||
safe_halt();
|
||||
else
|
||||
cpu_relax();
|
||||
@ -223,20 +229,31 @@ static inline void play_dead(void)
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
||||
void cpu_idle_wait(void)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t map;
|
||||
unsigned int cpu, this_cpu = get_cpu();
|
||||
cpumask_t map;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
cpu_set(cpu, cpu_idle_map);
|
||||
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
|
||||
put_cpu();
|
||||
|
||||
wmb();
|
||||
do {
|
||||
ssleep(1);
|
||||
cpus_and(map, cpu_idle_map, cpu_online_map);
|
||||
} while (!cpus_empty(map));
|
||||
cpus_clear(map);
|
||||
for_each_online_cpu(cpu) {
|
||||
per_cpu(cpu_idle_state, cpu) = 1;
|
||||
cpu_set(cpu, map);
|
||||
}
|
||||
|
||||
__get_cpu_var(cpu_idle_state) = 0;
|
||||
|
||||
wmb();
|
||||
do {
|
||||
ssleep(1);
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
|
||||
cpu_clear(cpu, map);
|
||||
}
|
||||
cpus_and(map, map, cpu_online_map);
|
||||
} while (!cpus_empty(map));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
|
||||
@ -244,7 +261,6 @@ void __attribute__((noreturn))
|
||||
cpu_idle (void)
|
||||
{
|
||||
void (*mark_idle)(int) = ia64_mark_idle;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
@ -255,12 +271,13 @@ cpu_idle (void)
|
||||
while (!need_resched()) {
|
||||
void (*idle)(void);
|
||||
|
||||
if (__get_cpu_var(cpu_idle_state))
|
||||
__get_cpu_var(cpu_idle_state) = 0;
|
||||
|
||||
rmb();
|
||||
if (mark_idle)
|
||||
(*mark_idle)(1);
|
||||
|
||||
if (cpu_isset(cpu, cpu_idle_map))
|
||||
cpu_clear(cpu, cpu_idle_map);
|
||||
rmb();
|
||||
idle = pm_idle;
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* Cache flushing routines.
|
||||
*
|
||||
* Copyright (C) 1999-2001 Hewlett-Packard Co
|
||||
* Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/page.h>
|
||||
@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range)
|
||||
|
||||
mov ar.lc=r8
|
||||
;;
|
||||
.Loop: fc in0 // issuable on M0 only
|
||||
.Loop: fc.i in0 // issuable on M2 only
|
||||
add in0=32,in0
|
||||
br.cloop.sptk.few .Loop
|
||||
;;
|
||||
|
@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
|
||||
mov f6=f0
|
||||
br.cond.sptk .common_code
|
||||
;;
|
||||
END(memcpy)
|
||||
GLOBAL_ENTRY(__copy_user)
|
||||
.prologue
|
||||
// check dest alignment
|
||||
@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
|
||||
#undef B
|
||||
#undef C
|
||||
#undef D
|
||||
END(memcpy)
|
||||
|
||||
/*
|
||||
* Due to lack of local tag support in gcc 2.x assembler, it is not clear which
|
||||
|
@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset)
|
||||
{ .mmi
|
||||
.prologue
|
||||
alloc tmp = ar.pfs, 3, 0, 0, 0
|
||||
.body
|
||||
lfetch.nt1 [dest] //
|
||||
.save ar.lc, save_lc
|
||||
mov.i save_lc = ar.lc
|
||||
.body
|
||||
} { .mmi
|
||||
mov ret0 = dest // return value
|
||||
cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
|
||||
|
@ -4,10 +4,15 @@
|
||||
# License. See the file "COPYING" in the main directory of this archive
|
||||
# for more details.
|
||||
#
|
||||
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
|
||||
# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
#
|
||||
|
||||
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
|
||||
huberror.o io_init.o iomv.o klconflib.o sn2/
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o
|
||||
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
|
||||
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
|
||||
xp-y := xp_main.o xp_nofault.o
|
||||
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
|
||||
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
|
||||
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
|
||||
|
@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void)
|
||||
if (status)
|
||||
continue;
|
||||
|
||||
/* Attach the error interrupt handlers */
|
||||
if (nasid & 1)
|
||||
ice_error_init(hubdev);
|
||||
else
|
||||
hub_error_init(hubdev);
|
||||
|
||||
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
|
||||
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
|
||||
|
||||
@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void)
|
||||
sn_flush_device_list;
|
||||
}
|
||||
|
||||
if (!(i & 1))
|
||||
hub_error_init(hubdev);
|
||||
else
|
||||
ice_error_init(hubdev);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
|
||||
* This function is the callback routine that SAL calls to log error
|
||||
* info for platform errors. buf is appended to sn_oemdata, resizing as
|
||||
* required.
|
||||
* Note: this is a SAL to OS callback, running under the same rules as the SAL
|
||||
* code. SAL calls are run with preempt disabled so this routine must not
|
||||
* sleep. vmalloc can sleep so print_hook cannot resize the output buffer
|
||||
* itself, instead it must set the required size and return to let the caller
|
||||
* resize the buffer then redrive the SAL call.
|
||||
*/
|
||||
static int print_hook(const char *fmt, ...)
|
||||
{
|
||||
@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...)
|
||||
vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
len = strlen(buf);
|
||||
while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
|
||||
u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
|
||||
if (!newbuf) {
|
||||
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
|
||||
__FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
|
||||
vfree(*sn_oemdata);
|
||||
*sn_oemdata = newbuf;
|
||||
}
|
||||
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
|
||||
if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
|
||||
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
|
||||
*sn_oemdata_size += len;
|
||||
return 0;
|
||||
}
|
||||
@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
|
||||
sn_oemdata = oemdata;
|
||||
sn_oemdata_size = oemdata_size;
|
||||
sn_oemdata_bufsize = 0;
|
||||
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
|
||||
*sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */
|
||||
while (*sn_oemdata_size > sn_oemdata_bufsize) {
|
||||
u8 *newbuf = vmalloc(*sn_oemdata_size);
|
||||
if (!newbuf) {
|
||||
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
|
||||
__FUNCTION__);
|
||||
return 1;
|
||||
}
|
||||
vfree(*sn_oemdata);
|
||||
*sn_oemdata = newbuf;
|
||||
sn_oemdata_bufsize = *sn_oemdata_size;
|
||||
*sn_oemdata_size = 0;
|
||||
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
|
||||
}
|
||||
up(&sn_oemdata_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
|
||||
* Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
||||
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
|
||||
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
|
||||
|
||||
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
|
||||
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
|
||||
|
||||
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
|
||||
EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
|
||||
|
||||
partid_t sn_partid = -1;
|
||||
EXPORT_SYMBOL(sn_partid);
|
||||
char sn_system_serial_number_string[128];
|
||||
@ -373,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p)
|
||||
{
|
||||
cnodeid_t cnode;
|
||||
|
||||
memset(pda->cnodeid_to_nasid_table, -1,
|
||||
sizeof(pda->cnodeid_to_nasid_table));
|
||||
memset(sn_cnodeid_to_nasid, -1,
|
||||
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
|
||||
for_each_online_node(cnode)
|
||||
pda->cnodeid_to_nasid_table[cnode] =
|
||||
pxm_to_nasid(nid_to_pxm_map[cnode]);
|
||||
sn_cnodeid_to_nasid[cnode] =
|
||||
pxm_to_nasid(nid_to_pxm_map[cnode]);
|
||||
|
||||
numionodes = num_online_nodes();
|
||||
scan_for_ionodes();
|
||||
@ -477,7 +483,8 @@ void __init sn_cpu_init(void)
|
||||
|
||||
cnode = nasid_to_cnodeid(nasid);
|
||||
|
||||
pda->p_nodepda = nodepdaindr[cnode];
|
||||
sn_nodepda = nodepdaindr[cnode];
|
||||
|
||||
pda->led_address =
|
||||
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
|
||||
pda->led_state = LED_ALWAYS_SET;
|
||||
@ -486,15 +493,18 @@ void __init sn_cpu_init(void)
|
||||
pda->idle_flag = 0;
|
||||
|
||||
if (cpuid != 0) {
|
||||
memcpy(pda->cnodeid_to_nasid_table,
|
||||
pdacpu(0)->cnodeid_to_nasid_table,
|
||||
sizeof(pda->cnodeid_to_nasid_table));
|
||||
/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
|
||||
memcpy(sn_cnodeid_to_nasid,
|
||||
(&per_cpu(__sn_cnodeid_to_nasid, 0)),
|
||||
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for WARs.
|
||||
* Only needs to be done once, on BSP.
|
||||
* Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
|
||||
* Has to be done after loop above, because it uses this cpu's
|
||||
* sn_cnodeid_to_nasid table which was just initialized if this
|
||||
* isn't cpu 0.
|
||||
* Has to be done before assignment below.
|
||||
*/
|
||||
if (!wars_have_been_checked) {
|
||||
@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void)
|
||||
brd = find_lboard_any(brd, KLTYPE_SNIA);
|
||||
|
||||
while (brd) {
|
||||
pda->cnodeid_to_nasid_table[numionodes] =
|
||||
brd->brd_nasid;
|
||||
sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
|
||||
physical_node_map[brd->brd_nasid] = numionodes;
|
||||
root_lboard[numionodes] = brd;
|
||||
numionodes++;
|
||||
@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void)
|
||||
root_lboard[nasid_to_cnodeid(nasid)],
|
||||
KLTYPE_TIO);
|
||||
while (brd) {
|
||||
pda->cnodeid_to_nasid_table[numionodes] =
|
||||
brd->brd_nasid;
|
||||
sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
|
||||
physical_node_map[brd->brd_nasid] = numionodes;
|
||||
root_lboard[numionodes] = brd;
|
||||
numionodes++;
|
||||
@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void)
|
||||
brd = find_lboard_any(brd, KLTYPE_TIO);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int
|
||||
@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice)
|
||||
long cpu;
|
||||
|
||||
for (cpu=0; cpu < NR_CPUS; cpu++)
|
||||
if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
|
||||
if (cpuid_to_nasid(cpu) == nasid &&
|
||||
cpuid_to_slice(cpu) == slice)
|
||||
return cpu;
|
||||
|
||||
return -1;
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <asm/sn/types.h>
|
||||
#include <asm/sn/shubio.h>
|
||||
#include <asm/sn/tiocx.h>
|
||||
#include <asm/sn/l1.h>
|
||||
#include <asm/sn/module.h>
|
||||
#include "tio.h"
|
||||
#include "xtalk/xwidgetdev.h"
|
||||
#include "xtalk/hubdev.h"
|
||||
@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t
|
||||
tiocx_dma_addr(uint64_t addr)
|
||||
uint64_t tiocx_dma_addr(uint64_t addr)
|
||||
{
|
||||
return PHYS_TO_TIODMA(addr);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
tiocx_swin_base(int nasid)
|
||||
uint64_t tiocx_swin_base(int nasid)
|
||||
{
|
||||
return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
|
||||
}
|
||||
@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type);
|
||||
EXPORT_SYMBOL(tiocx_dma_addr);
|
||||
EXPORT_SYMBOL(tiocx_swin_base);
|
||||
|
||||
static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
|
||||
{
|
||||
|
||||
struct ia64_sal_retval ret_stuff;
|
||||
ret_stuff.status = 0;
|
||||
ret_stuff.v0 = 0;
|
||||
|
||||
ia64_sal_oemcall_nolock(&ret_stuff,
|
||||
SN_SAL_IOIF_GET_HUBDEV_INFO,
|
||||
handle, address, 0, 0, 0, 0, 0);
|
||||
return ret_stuff.v0;
|
||||
}
|
||||
|
||||
static void tio_conveyor_set(nasid_t nasid, int enable_flag)
|
||||
{
|
||||
uint64_t ice_frz;
|
||||
@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
|
||||
udelay(2000);
|
||||
}
|
||||
|
||||
static int fpga_attached(nasid_t nasid)
|
||||
static int tiocx_btchar_get(int nasid)
|
||||
{
|
||||
moduleid_t module_id;
|
||||
geoid_t geoid;
|
||||
int cnodeid;
|
||||
|
||||
cnodeid = nasid_to_cnodeid(nasid);
|
||||
geoid = cnodeid_get_geoid(cnodeid);
|
||||
module_id = geo_module(geoid);
|
||||
return MODULE_GET_BTCHAR(module_id);
|
||||
}
|
||||
|
||||
static int is_fpga_brick(int nasid)
|
||||
{
|
||||
switch (tiocx_btchar_get(nasid)) {
|
||||
case L1_BRICKTYPE_SA:
|
||||
case L1_BRICKTYPE_ATHENA:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bitstream_loaded(nasid_t nasid)
|
||||
{
|
||||
uint64_t cx_credits;
|
||||
|
||||
@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
|
||||
int mfg_num = CX_DEV_NONE;
|
||||
nasid_t nasid = cx_dev->cx_id.nasid;
|
||||
|
||||
if (fpga_attached(nasid)) {
|
||||
if (bitstream_loaded(nasid)) {
|
||||
uint64_t cx_id;
|
||||
|
||||
cx_id =
|
||||
@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf)
|
||||
{
|
||||
struct cx_dev *cx_dev = to_cx_dev(dev);
|
||||
|
||||
return sprintf(buf, "0x%x 0x%x 0x%x\n",
|
||||
return sprintf(buf, "0x%x 0x%x 0x%x %d\n",
|
||||
cx_dev->cx_id.nasid,
|
||||
cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
|
||||
cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num,
|
||||
tiocx_btchar_get(cx_dev->cx_id.nasid));
|
||||
}
|
||||
|
||||
static ssize_t store_cxdev_control(struct device *dev, const char *buf,
|
||||
@ -475,20 +485,14 @@ static int __init tiocx_init(void)
|
||||
if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
|
||||
break; /* No more nasids .. bail out of loop */
|
||||
|
||||
if (nasid & 0x1) { /* TIO's are always odd */
|
||||
if ((nasid & 0x1) && is_fpga_brick(nasid)) {
|
||||
struct hubdev_info *hubdev;
|
||||
uint64_t status;
|
||||
struct xwidget_info *widgetp;
|
||||
|
||||
DBG("Found TIO at nasid 0x%x\n", nasid);
|
||||
|
||||
hubdev =
|
||||
(struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
|
||||
status =
|
||||
tiocx_get_hubdev_info(nasid,
|
||||
(uint64_t) __pa(hubdev));
|
||||
if (status)
|
||||
continue;
|
||||
|
||||
widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
|
||||
|
||||
|
289
arch/ia64/sn/kernel/xp_main.c
Normal file
289
arch/ia64/sn/kernel/xp_main.c
Normal file
@ -0,0 +1,289 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition (XP) base.
|
||||
*
|
||||
* XP provides a base from which its users can interact
|
||||
* with XPC, yet not be dependent on XPC.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/sn/intr.h>
|
||||
#include <asm/sn/sn_sal.h>
|
||||
#include <asm/sn/xp.h>
|
||||
|
||||
|
||||
/*
|
||||
* Target of nofault PIO read.
|
||||
*/
|
||||
u64 xp_nofault_PIOR_target;
|
||||
|
||||
|
||||
/*
|
||||
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
|
||||
* users of XPC.
|
||||
*/
|
||||
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
|
||||
|
||||
|
||||
/*
|
||||
* Initialize the XPC interface to indicate that XPC isn't loaded.
|
||||
*/
|
||||
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
|
||||
|
||||
struct xpc_interface xpc_interface = {
|
||||
(void (*)(int)) xpc_notloaded,
|
||||
(void (*)(int)) xpc_notloaded,
|
||||
(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
|
||||
(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
|
||||
(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
|
||||
xpc_notloaded,
|
||||
(void (*)(partid_t, int, void *)) xpc_notloaded,
|
||||
(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* XPC calls this when it (the XPC module) has been loaded.
|
||||
*/
|
||||
void
|
||||
xpc_set_interface(void (*connect)(int),
|
||||
void (*disconnect)(int),
|
||||
enum xpc_retval (*allocate)(partid_t, int, u32, void **),
|
||||
enum xpc_retval (*send)(partid_t, int, void *),
|
||||
enum xpc_retval (*send_notify)(partid_t, int, void *,
|
||||
xpc_notify_func, void *),
|
||||
void (*received)(partid_t, int, void *),
|
||||
enum xpc_retval (*partid_to_nasids)(partid_t, void *))
|
||||
{
|
||||
xpc_interface.connect = connect;
|
||||
xpc_interface.disconnect = disconnect;
|
||||
xpc_interface.allocate = allocate;
|
||||
xpc_interface.send = send;
|
||||
xpc_interface.send_notify = send_notify;
|
||||
xpc_interface.received = received;
|
||||
xpc_interface.partid_to_nasids = partid_to_nasids;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* XPC calls this when it (the XPC module) is being unloaded.
|
||||
*/
|
||||
void
|
||||
xpc_clear_interface(void)
|
||||
{
|
||||
xpc_interface.connect = (void (*)(int)) xpc_notloaded;
|
||||
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
|
||||
xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
|
||||
void **)) xpc_notloaded;
|
||||
xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
|
||||
xpc_notloaded;
|
||||
xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
|
||||
xpc_notify_func, void *)) xpc_notloaded;
|
||||
xpc_interface.received = (void (*)(partid_t, int, void *))
|
||||
xpc_notloaded;
|
||||
xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
|
||||
xpc_notloaded;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Register for automatic establishment of a channel connection whenever
|
||||
* a partition comes up.
|
||||
*
|
||||
* Arguments:
|
||||
*
|
||||
* ch_number - channel # to register for connection.
|
||||
* func - function to call for asynchronous notification of channel
|
||||
* state changes (i.e., connection, disconnection, error) and
|
||||
* the arrival of incoming messages.
|
||||
* key - pointer to optional user-defined value that gets passed back
|
||||
* to the user on any callouts made to func.
|
||||
* payload_size - size in bytes of the XPC message's payload area which
|
||||
* contains a user-defined message. The user should make
|
||||
* this large enough to hold their largest message.
|
||||
* nentries - max #of XPC message entries a message queue can contain.
|
||||
* The actual number, which is determined when a connection
|
||||
* is established and may be less then requested, will be
|
||||
* passed to the user via the xpcConnected callout.
|
||||
* assigned_limit - max number of kthreads allowed to be processing
|
||||
* messages (per connection) at any given instant.
|
||||
* idle_limit - max number of kthreads allowed to be idle at any given
|
||||
* instant.
|
||||
*/
|
||||
enum xpc_retval
|
||||
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
|
||||
u16 nentries, u32 assigned_limit, u32 idle_limit)
|
||||
{
|
||||
struct xpc_registration *registration;
|
||||
|
||||
|
||||
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
|
||||
DBUG_ON(payload_size == 0 || nentries == 0);
|
||||
DBUG_ON(func == NULL);
|
||||
DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
|
||||
|
||||
registration = &xpc_registrations[ch_number];
|
||||
|
||||
if (down_interruptible(®istration->sema) != 0) {
|
||||
return xpcInterrupted;
|
||||
}
|
||||
|
||||
/* if XPC_CHANNEL_REGISTERED(ch_number) */
|
||||
if (registration->func != NULL) {
|
||||
up(®istration->sema);
|
||||
return xpcAlreadyRegistered;
|
||||
}
|
||||
|
||||
/* register the channel for connection */
|
||||
registration->msg_size = XPC_MSG_SIZE(payload_size);
|
||||
registration->nentries = nentries;
|
||||
registration->assigned_limit = assigned_limit;
|
||||
registration->idle_limit = idle_limit;
|
||||
registration->key = key;
|
||||
registration->func = func;
|
||||
|
||||
up(®istration->sema);
|
||||
|
||||
xpc_interface.connect(ch_number);
|
||||
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Remove the registration for automatic connection of the specified channel
|
||||
* when a partition comes up.
|
||||
*
|
||||
* Before returning this xpc_disconnect() will wait for all connections on the
|
||||
* specified channel have been closed/torndown. So the caller can be assured
|
||||
* that they will not be receiving any more callouts from XPC to their
|
||||
* function registered via xpc_connect().
|
||||
*
|
||||
* Arguments:
|
||||
*
|
||||
* ch_number - channel # to unregister.
|
||||
*/
|
||||
void
|
||||
xpc_disconnect(int ch_number)
|
||||
{
|
||||
struct xpc_registration *registration;
|
||||
|
||||
|
||||
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
|
||||
|
||||
registration = &xpc_registrations[ch_number];
|
||||
|
||||
/*
|
||||
* We've decided not to make this a down_interruptible(), since we
|
||||
* figured XPC's users will just turn around and call xpc_disconnect()
|
||||
* again anyways, so we might as well wait, if need be.
|
||||
*/
|
||||
down(®istration->sema);
|
||||
|
||||
/* if !XPC_CHANNEL_REGISTERED(ch_number) */
|
||||
if (registration->func == NULL) {
|
||||
up(®istration->sema);
|
||||
return;
|
||||
}
|
||||
|
||||
/* remove the connection registration for the specified channel */
|
||||
registration->func = NULL;
|
||||
registration->key = NULL;
|
||||
registration->nentries = 0;
|
||||
registration->msg_size = 0;
|
||||
registration->assigned_limit = 0;
|
||||
registration->idle_limit = 0;
|
||||
|
||||
xpc_interface.disconnect(ch_number);
|
||||
|
||||
up(®istration->sema);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
int __init
|
||||
xp_init(void)
|
||||
{
|
||||
int ret, ch_number;
|
||||
u64 func_addr = *(u64 *) xp_nofault_PIOR;
|
||||
u64 err_func_addr = *(u64 *) xp_error_PIOR;
|
||||
|
||||
|
||||
if (!ia64_platform_is("sn2")) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register a nofault code region which performs a cross-partition
|
||||
* PIO read. If the PIO read times out, the MCA handler will consume
|
||||
* the error and return to a kernel-provided instruction to indicate
|
||||
* an error. This PIO read exists because it is guaranteed to timeout
|
||||
* if the destination is down (AMO operations do not timeout on at
|
||||
* least some CPUs on Shubs <= v1.2, which unfortunately we have to
|
||||
* work around).
|
||||
*/
|
||||
if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
|
||||
err_func_addr, 1, 1)) != 0) {
|
||||
printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
|
||||
ret);
|
||||
}
|
||||
/*
|
||||
* Setup the nofault PIO read target. (There is no special reason why
|
||||
* SH_IPI_ACCESS was selected.)
|
||||
*/
|
||||
if (is_shub2()) {
|
||||
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
|
||||
} else {
|
||||
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
|
||||
}
|
||||
|
||||
/* initialize the connection registration semaphores */
|
||||
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
|
||||
sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(xp_init);
|
||||
|
||||
|
||||
void __exit
|
||||
xp_exit(void)
|
||||
{
|
||||
u64 func_addr = *(u64 *) xp_nofault_PIOR;
|
||||
u64 err_func_addr = *(u64 *) xp_error_PIOR;
|
||||
|
||||
|
||||
/* unregister the PIO read nofault code region */
|
||||
(void) sn_register_nofault_code(func_addr, err_func_addr,
|
||||
err_func_addr, 1, 0);
|
||||
}
|
||||
module_exit(xp_exit);
|
||||
|
||||
|
||||
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
||||
MODULE_DESCRIPTION("Cross Partition (XP) base");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
EXPORT_SYMBOL(xp_nofault_PIOR);
|
||||
EXPORT_SYMBOL(xp_nofault_PIOR_target);
|
||||
EXPORT_SYMBOL(xpc_registrations);
|
||||
EXPORT_SYMBOL(xpc_interface);
|
||||
EXPORT_SYMBOL(xpc_clear_interface);
|
||||
EXPORT_SYMBOL(xpc_set_interface);
|
||||
EXPORT_SYMBOL(xpc_connect);
|
||||
EXPORT_SYMBOL(xpc_disconnect);
|
||||
|
31
arch/ia64/sn/kernel/xp_nofault.S
Normal file
31
arch/ia64/sn/kernel/xp_nofault.S
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
|
||||
* and attempts to load and consume a value from it. This function
|
||||
* will be registered as a nofault code block. In the event that the
|
||||
* PIO read fails, the MCA handler will force the error to look
|
||||
* corrected and vector to the xp_error_PIOR which will return an error.
|
||||
*
|
||||
* extern int xp_nofault_PIOR(void *remote_register);
|
||||
*/
|
||||
|
||||
.global xp_nofault_PIOR
|
||||
xp_nofault_PIOR:
|
||||
mov r8=r0 // Stage a success return value
|
||||
ld8.acq r9=[r32];; // PIO Read the specified register
|
||||
adds r9=1,r9 // Add to force a consume
|
||||
br.ret.sptk.many b0;; // Return success
|
||||
|
||||
.global xp_error_PIOR
|
||||
xp_error_PIOR:
|
||||
mov r8=1 // Return value of 1
|
||||
br.ret.sptk.many b0;; // Return failure
|
||||
|
991
arch/ia64/sn/kernel/xpc.h
Normal file
991
arch/ia64/sn/kernel/xpc.h
Normal file
@ -0,0 +1,991 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition Communication (XPC) structures and macros.
|
||||
*/
|
||||
|
||||
#ifndef _IA64_SN_KERNEL_XPC_H
|
||||
#define _IA64_SN_KERNEL_XPC_H
|
||||
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/device.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sn/bte.h>
|
||||
#include <asm/sn/clksupport.h>
|
||||
#include <asm/sn/addrs.h>
|
||||
#include <asm/sn/mspec.h>
|
||||
#include <asm/sn/shub_mmr.h>
|
||||
#include <asm/sn/xp.h>
|
||||
|
||||
|
||||
/*
|
||||
* XPC Version numbers consist of a major and minor number. XPC can always
|
||||
* talk to versions with same major #, and never talk to versions with a
|
||||
* different major #.
|
||||
*/
|
||||
#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
|
||||
#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
|
||||
#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
|
||||
|
||||
|
||||
/*
|
||||
* The next macros define word or bit representations for given
|
||||
* C-brick nasid in either the SAL provided bit array representing
|
||||
* nasids in the partition/machine or the AMO_t array used for
|
||||
* inter-partition initiation communications.
|
||||
*
|
||||
* For SN2 machines, C-Bricks are alway even numbered NASIDs. As
|
||||
* such, some space will be saved by insisting that nasid information
|
||||
* passed from SAL always be packed for C-Bricks and the
|
||||
* cross-partition interrupts use the same packing scheme.
|
||||
*/
|
||||
#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
|
||||
#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
|
||||
#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
|
||||
(1UL << XPC_NASID_B_INDEX(_n)))
|
||||
#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
|
||||
|
||||
#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
|
||||
#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */
|
||||
|
||||
/* define the process name of HB checker and the CPU it is pinned to */
|
||||
#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
|
||||
#define XPC_HB_CHECK_CPU 0
|
||||
|
||||
/* define the process name of the discovery thread */
|
||||
#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
|
||||
|
||||
|
||||
#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p)))
|
||||
#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p))
|
||||
#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p)))
|
||||
|
||||
|
||||
/*
|
||||
* Reserved Page provided by SAL.
|
||||
*
|
||||
* SAL provides one page per partition of reserved memory. When SAL
|
||||
* initialization is complete, SAL_signature, SAL_version, partid,
|
||||
* part_nasids, and mach_nasids are set.
|
||||
*
|
||||
* Note: Until vars_pa is set, the partition XPC code has not been initialized.
|
||||
*/
|
||||
struct xpc_rsvd_page {
|
||||
u64 SAL_signature; /* SAL unique signature */
|
||||
u64 SAL_version; /* SAL specified version */
|
||||
u8 partid; /* partition ID from SAL */
|
||||
u8 version;
|
||||
u8 pad[6]; /* pad to u64 align */
|
||||
u64 vars_pa;
|
||||
u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
|
||||
u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
|
||||
};
|
||||
#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
|
||||
|
||||
#define XPC_RSVD_PAGE_ALIGNED_SIZE \
|
||||
(L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)))
|
||||
|
||||
|
||||
/*
|
||||
* Define the structures by which XPC variables can be exported to other
|
||||
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The following structure describes the partition generic variables
|
||||
* needed by other partitions in order to properly initialize.
|
||||
*
|
||||
* struct xpc_vars version number also applies to struct xpc_vars_part.
|
||||
* Changes to either structure and/or related functionality should be
|
||||
* reflected by incrementing either the major or minor version numbers
|
||||
* of struct xpc_vars.
|
||||
*/
|
||||
struct xpc_vars {
|
||||
u8 version;
|
||||
u64 heartbeat;
|
||||
u64 heartbeating_to_mask;
|
||||
u64 kdb_status; /* 0 = machine running */
|
||||
int act_nasid;
|
||||
int act_phys_cpuid;
|
||||
u64 vars_part_pa;
|
||||
u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
|
||||
AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
|
||||
AMO_t *act_amos; /* pointer to the first activation AMO */
|
||||
};
|
||||
#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
|
||||
|
||||
#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars)))
|
||||
|
||||
/*
|
||||
* The following structure describes the per partition specific variables.
|
||||
*
|
||||
* An array of these structures, one per partition, will be defined. As a
|
||||
* partition becomes active XPC will copy the array entry corresponding to
|
||||
* itself from that partition. It is desirable that the size of this
|
||||
* structure evenly divide into a cacheline, such that none of the entries
|
||||
* in this array crosses a cacheline boundary. As it is now, each entry
|
||||
* occupies half a cacheline.
|
||||
*/
|
||||
struct xpc_vars_part {
|
||||
u64 magic;
|
||||
|
||||
u64 openclose_args_pa; /* physical address of open and close args */
|
||||
u64 GPs_pa; /* physical address of Get/Put values */
|
||||
|
||||
u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
|
||||
int IPI_nasid; /* nasid of where to send IPIs */
|
||||
int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
|
||||
|
||||
u8 nchannels; /* #of defined channels supported */
|
||||
|
||||
u8 reserved[23]; /* pad to a full 64 bytes */
|
||||
};
|
||||
|
||||
/*
|
||||
* The vars_part MAGIC numbers play a part in the first contact protocol.
|
||||
*
|
||||
* MAGIC1 indicates that the per partition specific variables for a remote
|
||||
* partition have been initialized by this partition.
|
||||
*
|
||||
* MAGIC2 indicates that this partition has pulled the remote partititions
|
||||
* per partition variables that pertain to this partition.
|
||||
*/
|
||||
#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
|
||||
#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Functions registered by add_timer() or called by kernel_thread() only
|
||||
* allow for a single 64-bit argument. The following macros can be used to
|
||||
* pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
|
||||
* the passed argument.
|
||||
*/
|
||||
#define XPC_PACK_ARGS(_arg1, _arg2) \
|
||||
((((u64) _arg1) & 0xffffffff) | \
|
||||
((((u64) _arg2) & 0xffffffff) << 32))
|
||||
|
||||
#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
|
||||
#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define a Get/Put value pair (pointers) used with a message queue.
|
||||
*/
|
||||
struct xpc_gp {
|
||||
s64 get; /* Get value */
|
||||
s64 put; /* Put value */
|
||||
};
|
||||
|
||||
#define XPC_GP_SIZE \
|
||||
L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define a structure that contains arguments associated with opening and
|
||||
* closing a channel.
|
||||
*/
|
||||
struct xpc_openclose_args {
|
||||
u16 reason; /* reason why channel is closing */
|
||||
u16 msg_size; /* sizeof each message entry */
|
||||
u16 remote_nentries; /* #of message entries in remote msg queue */
|
||||
u16 local_nentries; /* #of message entries in local msg queue */
|
||||
u64 local_msgqueue_pa; /* physical address of local message queue */
|
||||
};
|
||||
|
||||
#define XPC_OPENCLOSE_ARGS_SIZE \
|
||||
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
|
||||
|
||||
|
||||
|
||||
/* struct xpc_msg flags */
|
||||
|
||||
#define XPC_M_DONE 0x01 /* msg has been received/consumed */
|
||||
#define XPC_M_READY 0x02 /* msg is ready to be sent */
|
||||
#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
|
||||
|
||||
|
||||
#define XPC_MSG_ADDRESS(_payload) \
|
||||
((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Defines notify entry.
|
||||
*
|
||||
* This is used to notify a message's sender that their message was received
|
||||
* and consumed by the intended recipient.
|
||||
*/
|
||||
struct xpc_notify {
|
||||
struct semaphore sema; /* notify semaphore */
|
||||
u8 type; /* type of notification */
|
||||
|
||||
/* the following two fields are only used if type == XPC_N_CALL */
|
||||
xpc_notify_func func; /* user's notify function */
|
||||
void *key; /* pointer to user's key */
|
||||
};
|
||||
|
||||
/* struct xpc_notify type of notification */
|
||||
|
||||
#define XPC_N_CALL 0x01 /* notify function provided by user */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define the structure that manages all the stuff required by a channel. In
|
||||
* particular, they are used to manage the messages sent across the channel.
|
||||
*
|
||||
* This structure is private to a partition, and is NOT shared across the
|
||||
* partition boundary.
|
||||
*
|
||||
* There is an array of these structures for each remote partition. It is
|
||||
* allocated at the time a partition becomes active. The array contains one
|
||||
* of these structures for each potential channel connection to that partition.
|
||||
*
|
||||
* Each of these structures manages two message queues (circular buffers).
|
||||
* They are allocated at the time a channel connection is made. One of
|
||||
* these message queues (local_msgqueue) holds the locally created messages
|
||||
* that are destined for the remote partition. The other of these message
|
||||
* queues (remote_msgqueue) is a locally cached copy of the remote partition's
|
||||
* own local_msgqueue.
|
||||
*
|
||||
* The following is a description of the Get/Put pointers used to manage these
|
||||
* two message queues. Consider the local_msgqueue to be on one partition
|
||||
* and the remote_msgqueue to be its cached copy on another partition. A
|
||||
* description of what each of the lettered areas contains is included.
|
||||
*
|
||||
*
|
||||
* local_msgqueue remote_msgqueue
|
||||
*
|
||||
* |/////////| |/////////|
|
||||
* w_remote_GP.get --> +---------+ |/////////|
|
||||
* | F | |/////////|
|
||||
* remote_GP.get --> +---------+ +---------+ <-- local_GP->get
|
||||
* | | | |
|
||||
* | | | E |
|
||||
* | | | |
|
||||
* | | +---------+ <-- w_local_GP.get
|
||||
* | B | |/////////|
|
||||
* | | |////D////|
|
||||
* | | |/////////|
|
||||
* | | +---------+ <-- w_remote_GP.put
|
||||
* | | |////C////|
|
||||
* local_GP->put --> +---------+ +---------+ <-- remote_GP.put
|
||||
* | | |/////////|
|
||||
* | A | |/////////|
|
||||
* | | |/////////|
|
||||
* w_local_GP.put --> +---------+ |/////////|
|
||||
* |/////////| |/////////|
|
||||
*
|
||||
*
|
||||
* ( remote_GP.[get|put] are cached copies of the remote
|
||||
* partition's local_GP->[get|put], and thus their values can
|
||||
* lag behind their counterparts on the remote partition. )
|
||||
*
|
||||
*
|
||||
* A - Messages that have been allocated, but have not yet been sent to the
|
||||
* remote partition.
|
||||
*
|
||||
* B - Messages that have been sent, but have not yet been acknowledged by the
|
||||
* remote partition as having been received.
|
||||
*
|
||||
* C - Area that needs to be prepared for the copying of sent messages, by
|
||||
* the clearing of the message flags of any previously received messages.
|
||||
*
|
||||
* D - Area into which sent messages are to be copied from the remote
|
||||
* partition's local_msgqueue and then delivered to their intended
|
||||
* recipients. [ To allow for a multi-message copy, another pointer
|
||||
* (next_msg_to_pull) has been added to keep track of the next message
|
||||
* number needing to be copied (pulled). It chases after w_remote_GP.put.
|
||||
* Any messages lying between w_local_GP.get and next_msg_to_pull have
|
||||
* been copied and are ready to be delivered. ]
|
||||
*
|
||||
* E - Messages that have been copied and delivered, but have not yet been
|
||||
* acknowledged by the recipient as having been received.
|
||||
*
|
||||
* F - Messages that have been acknowledged, but XPC has not yet notified the
|
||||
* sender that the message was received by its intended recipient.
|
||||
* This is also an area that needs to be prepared for the allocating of
|
||||
* new messages, by the clearing of the message flags of the acknowledged
|
||||
* messages.
|
||||
*/
|
||||
struct xpc_channel {
|
||||
partid_t partid; /* ID of remote partition connected */
|
||||
spinlock_t lock; /* lock for updating this structure */
|
||||
u32 flags; /* general flags */
|
||||
|
||||
enum xpc_retval reason; /* reason why channel is disconnect'g */
|
||||
int reason_line; /* line# disconnect initiated from */
|
||||
|
||||
u16 number; /* channel # */
|
||||
|
||||
u16 msg_size; /* sizeof each msg entry */
|
||||
u16 local_nentries; /* #of msg entries in local msg queue */
|
||||
u16 remote_nentries; /* #of msg entries in remote msg queue*/
|
||||
|
||||
void *local_msgqueue_base; /* base address of kmalloc'd space */
|
||||
struct xpc_msg *local_msgqueue; /* local message queue */
|
||||
void *remote_msgqueue_base; /* base address of kmalloc'd space */
|
||||
struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
|
||||
/* local message queue */
|
||||
u64 remote_msgqueue_pa; /* phys addr of remote partition's */
|
||||
/* local message queue */
|
||||
|
||||
atomic_t references; /* #of external references to queues */
|
||||
|
||||
atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
|
||||
wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
|
||||
|
||||
/* queue of msg senders who want to be notified when msg received */
|
||||
|
||||
atomic_t n_to_notify; /* #of msg senders to notify */
|
||||
struct xpc_notify *notify_queue;/* notify queue for messages sent */
|
||||
|
||||
xpc_channel_func func; /* user's channel function */
|
||||
void *key; /* pointer to user's key */
|
||||
|
||||
struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
|
||||
struct semaphore teardown_sema; /* wait for teardown completion */
|
||||
|
||||
struct xpc_openclose_args *local_openclose_args; /* args passed on */
|
||||
/* opening or closing of channel */
|
||||
|
||||
/* various flavors of local and remote Get/Put values */
|
||||
|
||||
struct xpc_gp *local_GP; /* local Get/Put values */
|
||||
struct xpc_gp remote_GP; /* remote Get/Put values */
|
||||
struct xpc_gp w_local_GP; /* working local Get/Put values */
|
||||
struct xpc_gp w_remote_GP; /* working remote Get/Put values */
|
||||
s64 next_msg_to_pull; /* Put value of next msg to pull */
|
||||
|
||||
/* kthread management related fields */
|
||||
|
||||
// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
|
||||
// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
|
||||
// >>> dependent on activity over the last interval of time
|
||||
atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
|
||||
u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
|
||||
atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
|
||||
u32 kthreads_idle_limit; /* limit on #of kthreads idle */
|
||||
atomic_t kthreads_active; /* #of kthreads actively working */
|
||||
// >>> following field is temporary
|
||||
u32 kthreads_created; /* total #of kthreads created */
|
||||
|
||||
wait_queue_head_t idle_wq; /* idle kthread wait queue */
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
/* struct xpc_channel flags */
|
||||
|
||||
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
|
||||
|
||||
#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
|
||||
#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
|
||||
#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
|
||||
#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
|
||||
|
||||
#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
|
||||
#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */
|
||||
#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */
|
||||
#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */
|
||||
|
||||
#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */
|
||||
#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */
|
||||
#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */
|
||||
#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */
|
||||
|
||||
#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
|
||||
#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Manages channels on a partition basis. There is one of these structures
|
||||
* for each partition (a partition will never utilize the structure that
|
||||
* represents itself).
|
||||
*/
|
||||
struct xpc_partition {
|
||||
|
||||
/* XPC HB infrastructure */
|
||||
|
||||
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
|
||||
u64 remote_vars_pa; /* phys addr of partition's vars */
|
||||
u64 remote_vars_part_pa; /* phys addr of partition's vars part */
|
||||
u64 last_heartbeat; /* HB at last read */
|
||||
u64 remote_amos_page_pa; /* phys addr of partition's amos page */
|
||||
int remote_act_nasid; /* active part's act/deact nasid */
|
||||
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
|
||||
u32 act_IRQ_rcvd; /* IRQs since activation */
|
||||
spinlock_t act_lock; /* protect updating of act_state */
|
||||
u8 act_state; /* from XPC HB viewpoint */
|
||||
enum xpc_retval reason; /* reason partition is deactivating */
|
||||
int reason_line; /* line# deactivation initiated from */
|
||||
int reactivate_nasid; /* nasid in partition to reactivate */
|
||||
|
||||
|
||||
/* XPC infrastructure referencing and teardown control */
|
||||
|
||||
u8 setup_state; /* infrastructure setup state */
|
||||
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
|
||||
atomic_t references; /* #of references to infrastructure */
|
||||
|
||||
|
||||
/*
|
||||
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
|
||||
* XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
|
||||
* COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
|
||||
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
|
||||
*/
|
||||
|
||||
|
||||
u8 nchannels; /* #of defined channels supported */
|
||||
atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
|
||||
struct xpc_channel *channels;/* array of channel structures */
|
||||
|
||||
void *local_GPs_base; /* base address of kmalloc'd space */
|
||||
struct xpc_gp *local_GPs; /* local Get/Put values */
|
||||
void *remote_GPs_base; /* base address of kmalloc'd space */
|
||||
struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
|
||||
/* values */
|
||||
u64 remote_GPs_pa; /* phys address of remote partition's local */
|
||||
/* Get/Put values */
|
||||
|
||||
|
||||
/* fields used to pass args when opening or closing a channel */
|
||||
|
||||
void *local_openclose_args_base; /* base address of kmalloc'd space */
|
||||
struct xpc_openclose_args *local_openclose_args; /* local's args */
|
||||
void *remote_openclose_args_base; /* base address of kmalloc'd space */
|
||||
struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
|
||||
/* args */
|
||||
u64 remote_openclose_args_pa; /* phys addr of remote's args */
|
||||
|
||||
|
||||
/* IPI sending, receiving and handling related fields */
|
||||
|
||||
int remote_IPI_nasid; /* nasid of where to send IPIs */
|
||||
int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
|
||||
AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
|
||||
|
||||
AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
|
||||
u64 local_IPI_amo; /* IPI amo flags yet to be handled */
|
||||
char IPI_owner[8]; /* IPI owner's name */
|
||||
struct timer_list dropped_IPI_timer; /* dropped IPI timer */
|
||||
|
||||
spinlock_t IPI_lock; /* IPI handler lock */
|
||||
|
||||
|
||||
/* channel manager related fields */
|
||||
|
||||
atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
|
||||
wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
/* struct xpc_partition act_state values (for XPC HB) */
|
||||
|
||||
#define XPC_P_INACTIVE 0x00 /* partition is not active */
|
||||
#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
|
||||
#define XPC_P_ACTIVATING 0x02 /* activation thread started */
|
||||
#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
|
||||
#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
|
||||
|
||||
|
||||
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
|
||||
xpc_deactivate_partition(__LINE__, (_p), (_reason))
|
||||
|
||||
|
||||
/* struct xpc_partition setup_state values */
|
||||
|
||||
#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
|
||||
#define XPC_P_SETUP 0x01 /* infrastructure is setup */
|
||||
#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
|
||||
#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
|
||||
|
||||
|
||||
/*
|
||||
* struct xpc_partition IPI_timer #of seconds to wait before checking for
|
||||
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until
|
||||
* after the IPI was received.
|
||||
*/
|
||||
#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
|
||||
|
||||
|
||||
#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
|
||||
|
||||
|
||||
|
||||
/* found in xp_main.c */
|
||||
extern struct xpc_registration xpc_registrations[];
|
||||
|
||||
|
||||
/* >>> found in xpc_main.c only */
|
||||
extern struct device *xpc_part;
|
||||
extern struct device *xpc_chan;
|
||||
extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
|
||||
extern void xpc_dropped_IPI_check(struct xpc_partition *);
|
||||
extern void xpc_activate_kthreads(struct xpc_channel *, int);
|
||||
extern void xpc_create_kthreads(struct xpc_channel *, int);
|
||||
extern void xpc_disconnect_wait(int);
|
||||
|
||||
|
||||
/* found in xpc_main.c and efi-xpc.c */
|
||||
extern void xpc_activate_partition(struct xpc_partition *);
|
||||
|
||||
|
||||
/* found in xpc_partition.c */
|
||||
extern int xpc_exiting;
|
||||
extern int xpc_hb_interval;
|
||||
extern int xpc_hb_check_interval;
|
||||
extern struct xpc_vars *xpc_vars;
|
||||
extern struct xpc_rsvd_page *xpc_rsvd_page;
|
||||
extern struct xpc_vars_part *xpc_vars_part;
|
||||
extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
|
||||
extern char xpc_remote_copy_buffer[];
|
||||
extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
|
||||
extern void xpc_allow_IPI_ops(void);
|
||||
extern void xpc_restrict_IPI_ops(void);
|
||||
extern int xpc_identify_act_IRQ_sender(void);
|
||||
extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
|
||||
extern void xpc_mark_partition_inactive(struct xpc_partition *);
|
||||
extern void xpc_discovery(void);
|
||||
extern void xpc_check_remote_hb(void);
|
||||
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
|
||||
enum xpc_retval);
|
||||
extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
|
||||
|
||||
|
||||
/* found in xpc_channel.c */
|
||||
extern void xpc_initiate_connect(int);
|
||||
extern void xpc_initiate_disconnect(int);
|
||||
extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
|
||||
extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
|
||||
extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
|
||||
xpc_notify_func, void *);
|
||||
extern void xpc_initiate_received(partid_t, int, void *);
|
||||
extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
|
||||
extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
|
||||
extern void xpc_process_channel_activity(struct xpc_partition *);
|
||||
extern void xpc_connected_callout(struct xpc_channel *);
|
||||
extern void xpc_deliver_msg(struct xpc_channel *);
|
||||
extern void xpc_disconnect_channel(const int, struct xpc_channel *,
|
||||
enum xpc_retval, unsigned long *);
|
||||
extern void xpc_disconnected_callout(struct xpc_channel *);
|
||||
extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval);
|
||||
extern void xpc_teardown_infrastructure(struct xpc_partition *);
|
||||
|
||||
|
||||
|
||||
static inline void
|
||||
xpc_wakeup_channel_mgr(struct xpc_partition *part)
|
||||
{
|
||||
if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
|
||||
wake_up(&part->channel_mgr_wq);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* These next two inlines are used to keep us from tearing down a channel's
|
||||
* msg queues while a thread may be referencing them.
|
||||
*/
|
||||
static inline void
|
||||
xpc_msgqueue_ref(struct xpc_channel *ch)
|
||||
{
|
||||
atomic_inc(&ch->references);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_msgqueue_deref(struct xpc_channel *ch)
|
||||
{
|
||||
s32 refs = atomic_dec_return(&ch->references);
|
||||
|
||||
DBUG_ON(refs < 0);
|
||||
if (refs == 0) {
|
||||
xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
|
||||
xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
|
||||
|
||||
|
||||
/*
|
||||
* These two inlines are used to keep us from tearing down a partition's
|
||||
* setup infrastructure while a thread may be referencing it.
|
||||
*/
|
||||
static inline void
|
||||
xpc_part_deref(struct xpc_partition *part)
|
||||
{
|
||||
s32 refs = atomic_dec_return(&part->references);
|
||||
|
||||
|
||||
DBUG_ON(refs < 0);
|
||||
if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
|
||||
wake_up(&part->teardown_wq);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
xpc_part_ref(struct xpc_partition *part)
|
||||
{
|
||||
int setup;
|
||||
|
||||
|
||||
atomic_inc(&part->references);
|
||||
setup = (part->setup_state == XPC_P_SETUP);
|
||||
if (!setup) {
|
||||
xpc_part_deref(part);
|
||||
}
|
||||
return setup;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The following macro is to be used for the setting of the reason and
|
||||
* reason_line fields in both the struct xpc_channel and struct xpc_partition
|
||||
* structures.
|
||||
*/
|
||||
#define XPC_SET_REASON(_p, _reason, _line) \
|
||||
{ \
|
||||
(_p)->reason = _reason; \
|
||||
(_p)->reason_line = _line; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The following set of macros and inlines are used for the sending and
|
||||
* receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
|
||||
* one that is associated with partition activity (SGI_XPC_ACTIVATE) and
|
||||
* the other that is associated with channel activity (SGI_XPC_NOTIFY).
|
||||
*/
|
||||
|
||||
static inline u64
|
||||
xpc_IPI_receive(AMO_t *amo)
|
||||
{
|
||||
return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
|
||||
}
|
||||
|
||||
|
||||
static inline enum xpc_retval
|
||||
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
|
||||
sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
|
||||
|
||||
/*
|
||||
* We must always use the nofault function regardless of whether we
|
||||
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
|
||||
* didn't, we'd never know that the other partition is down and would
|
||||
* keep sending IPIs and AMOs to it until the heartbeat times out.
|
||||
*/
|
||||
ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
|
||||
xp_nofault_PIOR_target));
|
||||
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return ((ret == 0) ? xpcSuccess : xpcPioReadError);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IPIs associated with SGI_XPC_ACTIVATE IRQ.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Flag the appropriate AMO variable and send an IPI to the specified node.
|
||||
*/
|
||||
static inline void
|
||||
xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid,
|
||||
int to_phys_cpuid)
|
||||
{
|
||||
int w_index = XPC_NASID_W_INDEX(from_nasid);
|
||||
int b_index = XPC_NASID_B_INDEX(from_nasid);
|
||||
AMO_t *amos = (AMO_t *) __va(amos_page +
|
||||
(XP_MAX_PARTITIONS * sizeof(AMO_t)));
|
||||
|
||||
|
||||
(void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
|
||||
to_phys_cpuid, SGI_XPC_ACTIVATE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_activate(struct xpc_vars *vars)
|
||||
{
|
||||
xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
|
||||
vars->act_nasid, vars->act_phys_cpuid);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_activated(struct xpc_partition *part)
|
||||
{
|
||||
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
|
||||
part->remote_act_nasid, part->remote_act_phys_cpuid);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_reactivate(struct xpc_partition *part)
|
||||
{
|
||||
xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
|
||||
xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IPIs associated with SGI_XPC_NOTIFY IRQ.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Send an IPI to the remote partition that is associated with the
|
||||
* specified channel.
|
||||
*/
|
||||
#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
|
||||
xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
|
||||
|
||||
static inline void
|
||||
xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
if (likely(part->act_state != XPC_P_DEACTIVATING)) {
|
||||
ret = xpc_IPI_send(part->remote_IPI_amo_va,
|
||||
(u64) ipi_flag << (ch->number * 8),
|
||||
part->remote_IPI_nasid,
|
||||
part->remote_IPI_phys_cpuid,
|
||||
SGI_XPC_NOTIFY);
|
||||
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
|
||||
ipi_flag_string, ch->partid, ch->number, ret);
|
||||
if (unlikely(ret != xpcSuccess)) {
|
||||
if (irq_flags != NULL) {
|
||||
spin_unlock_irqrestore(&ch->lock, *irq_flags);
|
||||
}
|
||||
XPC_DEACTIVATE_PARTITION(part, ret);
|
||||
if (irq_flags != NULL) {
|
||||
spin_lock_irqsave(&ch->lock, *irq_flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Make it look like the remote partition, which is associated with the
|
||||
* specified channel, sent us an IPI. This faked IPI will be handled
|
||||
* by xpc_dropped_IPI_check().
|
||||
*/
|
||||
#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
|
||||
xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
|
||||
|
||||
static inline void
|
||||
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
|
||||
char *ipi_flag_string)
|
||||
{
|
||||
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
||||
|
||||
|
||||
FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
|
||||
FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
|
||||
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
|
||||
ipi_flag_string, ch->partid, ch->number);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The sending and receiving of IPIs includes the setting of an AMO variable
|
||||
* to indicate the reason the IPI was sent. The 64-bit variable is divided
|
||||
* up into eight bytes, ordered from right to left. Byte zero pertains to
|
||||
* channel 0, byte one to channel 1, and so on. Each byte is described by
|
||||
* the following IPI flags.
|
||||
*/
|
||||
|
||||
#define XPC_IPI_CLOSEREQUEST 0x01
|
||||
#define XPC_IPI_CLOSEREPLY 0x02
|
||||
#define XPC_IPI_OPENREQUEST 0x04
|
||||
#define XPC_IPI_OPENREPLY 0x08
|
||||
#define XPC_IPI_MSGREQUEST 0x10
|
||||
|
||||
|
||||
/* given an AMO variable and a channel#, get its associated IPI flags */
|
||||
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
|
||||
|
||||
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
|
||||
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
|
||||
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->reason = ch->reason;
|
||||
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->msg_size = ch->msg_size;
|
||||
args->local_nentries = ch->local_nentries;
|
||||
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->remote_nentries = ch->remote_nentries;
|
||||
args->local_nentries = ch->local_nentries;
|
||||
args->local_msgqueue_pa = __pa(ch->local_msgqueue);
|
||||
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_msgrequest(struct xpc_channel *ch)
|
||||
{
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
|
||||
{
|
||||
XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Memory for XPC's AMO variables is allocated by the MSPEC driver. These
|
||||
* pages are located in the lowest granule. The lowest granule uses 4k pages
|
||||
* for cached references and an alternate TLB handler to never provide a
|
||||
* cacheable mapping for the entire region. This will prevent speculative
|
||||
* reading of cached copies of our lines from being issued which will cause
|
||||
* a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
|
||||
* (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c)
|
||||
* and an additional 16 AMO variables for partition activation (xpc_hb.c).
|
||||
*/
|
||||
static inline AMO_t *
|
||||
xpc_IPI_init(partid_t partid)
|
||||
{
|
||||
AMO_t *part_amo = xpc_vars->amos_page + partid;
|
||||
|
||||
|
||||
xpc_IPI_receive(part_amo);
|
||||
return part_amo;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline enum xpc_retval
|
||||
xpc_map_bte_errors(bte_result_t error)
|
||||
{
|
||||
switch (error) {
|
||||
case BTE_SUCCESS: return xpcSuccess;
|
||||
case BTEFAIL_DIR: return xpcBteDirectoryError;
|
||||
case BTEFAIL_POISON: return xpcBtePoisonError;
|
||||
case BTEFAIL_WERR: return xpcBteWriteError;
|
||||
case BTEFAIL_ACCESS: return xpcBteAccessError;
|
||||
case BTEFAIL_PWERR: return xpcBtePWriteError;
|
||||
case BTEFAIL_PRERR: return xpcBtePReadError;
|
||||
case BTEFAIL_TOUT: return xpcBteTimeOutError;
|
||||
case BTEFAIL_XTERR: return xpcBteXtalkError;
|
||||
case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
|
||||
default: return xpcBteUnmappedError;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void *
|
||||
xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
|
||||
{
|
||||
/* see if kmalloc will give us cachline aligned memory by default */
|
||||
*base = kmalloc(size, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
|
||||
return *base;
|
||||
}
|
||||
kfree(*base);
|
||||
|
||||
/* nope, we'll have to do it ourselves */
|
||||
*base = kmalloc(size + L1_CACHE_BYTES, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (void *) L1_CACHE_ALIGN((u64) *base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Check to see if there is any channel activity to/from the specified
|
||||
* partition.
|
||||
*/
|
||||
static inline void
|
||||
xpc_check_for_channel_activity(struct xpc_partition *part)
|
||||
{
|
||||
u64 IPI_amo;
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
|
||||
if (IPI_amo == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&part->IPI_lock, irq_flags);
|
||||
part->local_IPI_amo |= IPI_amo;
|
||||
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
|
||||
|
||||
dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
|
||||
XPC_PARTID(part), IPI_amo);
|
||||
|
||||
xpc_wakeup_channel_mgr(part);
|
||||
}
|
||||
|
||||
|
||||
#endif /* _IA64_SN_KERNEL_XPC_H */
|
||||
|
2297
arch/ia64/sn/kernel/xpc_channel.c
Normal file
2297
arch/ia64/sn/kernel/xpc_channel.c
Normal file
File diff suppressed because it is too large
Load Diff
1064
arch/ia64/sn/kernel/xpc_main.c
Normal file
1064
arch/ia64/sn/kernel/xpc_main.c
Normal file
File diff suppressed because it is too large
Load Diff
984
arch/ia64/sn/kernel/xpc_partition.c
Normal file
984
arch/ia64/sn/kernel/xpc_partition.c
Normal file
@ -0,0 +1,984 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition Communication (XPC) partition support.
|
||||
*
|
||||
* This is the part of XPC that detects the presence/absence of
|
||||
* other partitions. It provides a heartbeat and monitors the
|
||||
* heartbeats of other partitions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <asm/sn/bte.h>
|
||||
#include <asm/sn/intr.h>
|
||||
#include <asm/sn/sn_sal.h>
|
||||
#include <asm/sn/nodepda.h>
|
||||
#include <asm/sn/addrs.h>
|
||||
#include "xpc.h"
|
||||
|
||||
|
||||
/* XPC is exiting flag */
|
||||
int xpc_exiting;
|
||||
|
||||
|
||||
/* SH_IPI_ACCESS shub register value on startup */
|
||||
static u64 xpc_sh1_IPI_access;
|
||||
static u64 xpc_sh2_IPI_access0;
|
||||
static u64 xpc_sh2_IPI_access1;
|
||||
static u64 xpc_sh2_IPI_access2;
|
||||
static u64 xpc_sh2_IPI_access3;
|
||||
|
||||
|
||||
/* original protection values for each node */
|
||||
u64 xpc_prot_vec[MAX_COMPACT_NODES];
|
||||
|
||||
|
||||
/* this partition's reserved page */
|
||||
struct xpc_rsvd_page *xpc_rsvd_page;
|
||||
|
||||
/* this partition's XPC variables (within the reserved page) */
|
||||
struct xpc_vars *xpc_vars;
|
||||
struct xpc_vars_part *xpc_vars_part;
|
||||
|
||||
|
||||
/*
|
||||
* For performance reasons, each entry of xpc_partitions[] is cacheline
|
||||
* aligned. And xpc_partitions[] is padded with an additional entry at the
|
||||
* end so that the last legitimate entry doesn't share its cacheline with
|
||||
* another variable.
|
||||
*/
|
||||
struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
|
||||
|
||||
|
||||
/*
|
||||
* Generic buffer used to store a local copy of the remote partitions
|
||||
* reserved page or XPC variables.
|
||||
*
|
||||
* xpc_discovery runs only once and is a seperate thread that is
|
||||
* very likely going to be processing in parallel with receiving
|
||||
* interrupts.
|
||||
*/
|
||||
char ____cacheline_aligned
|
||||
xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE];
|
||||
|
||||
|
||||
/* systune related variables */
|
||||
int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
|
||||
int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT;
|
||||
|
||||
|
||||
/*
|
||||
* Given a nasid, get the physical address of the partition's reserved page
|
||||
* for that nasid. This function returns 0 on any error.
|
||||
*/
|
||||
static u64
|
||||
xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size)
|
||||
{
|
||||
bte_result_t bte_res;
|
||||
s64 status;
|
||||
u64 cookie = 0;
|
||||
u64 rp_pa = nasid; /* seed with nasid */
|
||||
u64 len = 0;
|
||||
|
||||
|
||||
while (1) {
|
||||
|
||||
status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
|
||||
&len);
|
||||
|
||||
dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
|
||||
"0x%016lx, address=0x%016lx, len=0x%016lx\n",
|
||||
status, cookie, rp_pa, len);
|
||||
|
||||
if (status != SALRET_MORE_PASSES) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (len > buf_size) {
|
||||
dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len);
|
||||
status = SALRET_ERROR;
|
||||
break;
|
||||
}
|
||||
|
||||
bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size,
|
||||
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
if (bte_res != BTE_SUCCESS) {
|
||||
dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
|
||||
status = SALRET_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (status != SALRET_OK) {
|
||||
rp_pa = 0;
|
||||
}
|
||||
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
|
||||
return rp_pa;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fill the partition reserved page with the information needed by
|
||||
* other partitions to discover we are alive and establish initial
|
||||
* communications.
|
||||
*/
|
||||
struct xpc_rsvd_page *
|
||||
xpc_rsvd_page_init(void)
|
||||
{
|
||||
struct xpc_rsvd_page *rp;
|
||||
AMO_t *amos_page;
|
||||
u64 rp_pa, next_cl, nasid_array = 0;
|
||||
int i, ret;
|
||||
|
||||
|
||||
/* get the local reserved page's address */
|
||||
|
||||
rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
|
||||
(u64) xpc_remote_copy_buffer,
|
||||
XPC_RSVD_PAGE_ALIGNED_SIZE);
|
||||
if (rp_pa == 0) {
|
||||
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
|
||||
return NULL;
|
||||
}
|
||||
rp = (struct xpc_rsvd_page *) __va(rp_pa);
|
||||
|
||||
if (rp->partid != sn_partition_id) {
|
||||
dev_err(xpc_part, "the reserved page's partid of %d should be "
|
||||
"%d\n", rp->partid, sn_partition_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rp->version = XPC_RP_VERSION;
|
||||
|
||||
/*
|
||||
* Place the XPC variables on the cache line following the
|
||||
* reserved page structure.
|
||||
*/
|
||||
next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
|
||||
xpc_vars = (struct xpc_vars *) next_cl;
|
||||
|
||||
/*
|
||||
* Before clearing xpc_vars, see if a page of AMOs had been previously
|
||||
* allocated. If not we'll need to allocate one and set permissions
|
||||
* so that cross-partition AMOs are allowed.
|
||||
*
|
||||
* The allocated AMO page needs MCA reporting to remain disabled after
|
||||
* XPC has unloaded. To make this work, we keep a copy of the pointer
|
||||
* to this page (i.e., amos_page) in the struct xpc_vars structure,
|
||||
* which is pointed to by the reserved page, and re-use that saved copy
|
||||
* on subsequent loads of XPC. This AMO page is never freed, and its
|
||||
* memory protections are never restricted.
|
||||
*/
|
||||
if ((amos_page = xpc_vars->amos_page) == NULL) {
|
||||
amos_page = (AMO_t *) mspec_kalloc_page(0);
|
||||
if (amos_page == NULL) {
|
||||
dev_err(xpc_part, "can't allocate page of AMOs\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
|
||||
* when xpc_allow_IPI_ops() is called via xpc_hb_init().
|
||||
*/
|
||||
if (!enable_shub_wars_1_1()) {
|
||||
ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
|
||||
PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
|
||||
&nasid_array);
|
||||
if (ret != 0) {
|
||||
dev_err(xpc_part, "can't change memory "
|
||||
"protections\n");
|
||||
mspec_kfree_page((unsigned long) amos_page);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
|
||||
/*
|
||||
* EFI's XPBOOT can also set amos_page in the reserved page,
|
||||
* but it happens to leave it as an uncached physical address
|
||||
* and we need it to be an uncached virtual, so we'll have to
|
||||
* convert it.
|
||||
*/
|
||||
if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
|
||||
dev_err(xpc_part, "previously used amos_page address "
|
||||
"is bad = 0x%p\n", (void *) amos_page);
|
||||
return NULL;
|
||||
}
|
||||
amos_page = (AMO_t *) TO_AMO((u64) amos_page);
|
||||
}
|
||||
|
||||
memset(xpc_vars, 0, sizeof(struct xpc_vars));
|
||||
|
||||
/*
|
||||
* Place the XPC per partition specific variables on the cache line
|
||||
* following the XPC variables structure.
|
||||
*/
|
||||
next_cl += XPC_VARS_ALIGNED_SIZE;
|
||||
memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
|
||||
XP_MAX_PARTITIONS);
|
||||
xpc_vars_part = (struct xpc_vars_part *) next_cl;
|
||||
xpc_vars->vars_part_pa = __pa(next_cl);
|
||||
|
||||
xpc_vars->version = XPC_V_VERSION;
|
||||
xpc_vars->act_nasid = cpuid_to_nasid(0);
|
||||
xpc_vars->act_phys_cpuid = cpu_physical_id(0);
|
||||
xpc_vars->amos_page = amos_page; /* save for next load of XPC */
|
||||
|
||||
|
||||
/*
|
||||
* Initialize the activation related AMO variables.
|
||||
*/
|
||||
xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
|
||||
for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
|
||||
xpc_IPI_init(i + XP_MAX_PARTITIONS);
|
||||
}
|
||||
/* export AMO page's physical address to other partitions */
|
||||
xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);
|
||||
|
||||
/*
|
||||
* This signifies to the remote partition that our reserved
|
||||
* page is initialized.
|
||||
*/
|
||||
(volatile u64) rp->vars_pa = __pa(xpc_vars);
|
||||
|
||||
return rp;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Change protections to allow IPI operations (and AMO operations on
|
||||
* Shub 1.1 systems).
|
||||
*/
|
||||
void
|
||||
xpc_allow_IPI_ops(void)
|
||||
{
|
||||
int node;
|
||||
int nasid;
|
||||
|
||||
|
||||
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
|
||||
|
||||
if (is_shub2()) {
|
||||
xpc_sh2_IPI_access0 =
|
||||
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
|
||||
xpc_sh2_IPI_access1 =
|
||||
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
|
||||
xpc_sh2_IPI_access2 =
|
||||
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
|
||||
xpc_sh2_IPI_access3 =
|
||||
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
|
||||
|
||||
for_each_online_node(node) {
|
||||
nasid = cnodeid_to_nasid(node);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
|
||||
-1UL);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
|
||||
-1UL);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
|
||||
-1UL);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
|
||||
-1UL);
|
||||
}
|
||||
|
||||
} else {
|
||||
xpc_sh1_IPI_access =
|
||||
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
|
||||
|
||||
for_each_online_node(node) {
|
||||
nasid = cnodeid_to_nasid(node);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
|
||||
-1UL);
|
||||
|
||||
/*
|
||||
* Since the BIST collides with memory operations on
|
||||
* SHUB 1.1 sn_change_memprotect() cannot be used.
|
||||
*/
|
||||
if (enable_shub_wars_1_1()) {
|
||||
/* open up everything */
|
||||
xpc_prot_vec[node] = (u64) HUB_L((u64 *)
|
||||
GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQLP_MMR_DIR_PRIVEC0));
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQLP_MMR_DIR_PRIVEC0),
|
||||
-1UL);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
|
||||
-1UL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Restrict protections to disallow IPI operations (and AMO operations on
|
||||
* Shub 1.1 systems).
|
||||
*/
|
||||
void
|
||||
xpc_restrict_IPI_ops(void)
|
||||
{
|
||||
int node;
|
||||
int nasid;
|
||||
|
||||
|
||||
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
|
||||
|
||||
if (is_shub2()) {
|
||||
|
||||
for_each_online_node(node) {
|
||||
nasid = cnodeid_to_nasid(node);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
|
||||
xpc_sh2_IPI_access0);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
|
||||
xpc_sh2_IPI_access1);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
|
||||
xpc_sh2_IPI_access2);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
|
||||
xpc_sh2_IPI_access3);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
for_each_online_node(node) {
|
||||
nasid = cnodeid_to_nasid(node);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
|
||||
xpc_sh1_IPI_access);
|
||||
|
||||
if (enable_shub_wars_1_1()) {
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQLP_MMR_DIR_PRIVEC0),
|
||||
xpc_prot_vec[node]);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
|
||||
xpc_prot_vec[node]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* At periodic intervals, scan through all active partitions and ensure
|
||||
* their heartbeat is still active. If not, the partition is deactivated.
|
||||
*/
|
||||
void
|
||||
xpc_check_remote_hb(void)
|
||||
{
|
||||
struct xpc_vars *remote_vars;
|
||||
struct xpc_partition *part;
|
||||
partid_t partid;
|
||||
bte_result_t bres;
|
||||
|
||||
|
||||
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
|
||||
|
||||
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
||||
if (partid == sn_partition_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
part = &xpc_partitions[partid];
|
||||
|
||||
if (part->act_state == XPC_P_INACTIVE ||
|
||||
part->act_state == XPC_P_DEACTIVATING) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* pull the remote_hb cache line */
|
||||
bres = xp_bte_copy(part->remote_vars_pa,
|
||||
ia64_tpa((u64) remote_vars),
|
||||
XPC_VARS_ALIGNED_SIZE,
|
||||
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
if (bres != BTE_SUCCESS) {
|
||||
XPC_DEACTIVATE_PARTITION(part,
|
||||
xpc_map_bte_errors(bres));
|
||||
continue;
|
||||
}
|
||||
|
||||
dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
|
||||
" = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
|
||||
remote_vars->heartbeat, part->last_heartbeat,
|
||||
remote_vars->kdb_status,
|
||||
remote_vars->heartbeating_to_mask);
|
||||
|
||||
if (((remote_vars->heartbeat == part->last_heartbeat) &&
|
||||
(remote_vars->kdb_status == 0)) ||
|
||||
!XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
|
||||
|
||||
XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
|
||||
continue;
|
||||
}
|
||||
|
||||
part->last_heartbeat = remote_vars->heartbeat;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get a copy of the remote partition's rsvd page.
|
||||
*
|
||||
* remote_rp points to a buffer that is cacheline aligned for BTE copies and
|
||||
* assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE.
|
||||
*/
|
||||
static enum xpc_retval
|
||||
xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa)
|
||||
{
|
||||
int bres, i;
|
||||
|
||||
|
||||
/* get the reserved page's physical address */
|
||||
|
||||
*remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp,
|
||||
XPC_RSVD_PAGE_ALIGNED_SIZE);
|
||||
if (*remote_rsvd_page_pa == 0) {
|
||||
return xpcNoRsvdPageAddr;
|
||||
}
|
||||
|
||||
|
||||
/* pull over the reserved page structure */
|
||||
|
||||
bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp),
|
||||
XPC_RSVD_PAGE_ALIGNED_SIZE,
|
||||
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
if (bres != BTE_SUCCESS) {
|
||||
return xpc_map_bte_errors(bres);
|
||||
}
|
||||
|
||||
|
||||
if (discovered_nasids != NULL) {
|
||||
for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
|
||||
discovered_nasids[i] |= remote_rp->part_nasids[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* check that the partid is for another partition */
|
||||
|
||||
if (remote_rp->partid < 1 ||
|
||||
remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
|
||||
return xpcInvalidPartid;
|
||||
}
|
||||
|
||||
if (remote_rp->partid == sn_partition_id) {
|
||||
return xpcLocalPartid;
|
||||
}
|
||||
|
||||
|
||||
if (XPC_VERSION_MAJOR(remote_rp->version) !=
|
||||
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
|
||||
return xpcBadVersion;
|
||||
}
|
||||
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get a copy of the remote partition's XPC variables.
|
||||
*
|
||||
* remote_vars points to a buffer that is cacheline aligned for BTE copies and
|
||||
* assumed to be of size XPC_VARS_ALIGNED_SIZE.
|
||||
*/
|
||||
static enum xpc_retval
|
||||
xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
|
||||
{
|
||||
int bres;
|
||||
|
||||
|
||||
if (remote_vars_pa == 0) {
|
||||
return xpcVarsNotSet;
|
||||
}
|
||||
|
||||
|
||||
/* pull over the cross partition variables */
|
||||
|
||||
bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
|
||||
XPC_VARS_ALIGNED_SIZE,
|
||||
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
if (bres != BTE_SUCCESS) {
|
||||
return xpc_map_bte_errors(bres);
|
||||
}
|
||||
|
||||
if (XPC_VERSION_MAJOR(remote_vars->version) !=
|
||||
XPC_VERSION_MAJOR(XPC_V_VERSION)) {
|
||||
return xpcBadVersion;
|
||||
}
|
||||
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Prior code has determine the nasid which generated an IPI. Inspect
|
||||
* that nasid to determine if its partition needs to be activated or
|
||||
* deactivated.
|
||||
*
|
||||
* A partition is consider "awaiting activation" if our partition
|
||||
* flags indicate it is not active and it has a heartbeat. A
|
||||
* partition is considered "awaiting deactivation" if our partition
|
||||
* flags indicate it is active but it has no heartbeat or it is not
|
||||
* sending its heartbeat to us.
|
||||
*
|
||||
* To determine the heartbeat, the remote nasid must have a properly
|
||||
* initialized reserved page.
|
||||
*/
|
||||
static void
|
||||
xpc_identify_act_IRQ_req(int nasid)
|
||||
{
|
||||
struct xpc_rsvd_page *remote_rp;
|
||||
struct xpc_vars *remote_vars;
|
||||
u64 remote_rsvd_page_pa;
|
||||
u64 remote_vars_pa;
|
||||
partid_t partid;
|
||||
struct xpc_partition *part;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
/* pull over the reserved page structure */
|
||||
|
||||
remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
|
||||
|
||||
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa);
|
||||
if (ret != xpcSuccess) {
|
||||
dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
|
||||
"which sent interrupt, reason=%d\n", nasid, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
remote_vars_pa = remote_rp->vars_pa;
|
||||
partid = remote_rp->partid;
|
||||
part = &xpc_partitions[partid];
|
||||
|
||||
|
||||
/* pull over the cross partition variables */
|
||||
|
||||
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
|
||||
|
||||
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
|
||||
if (ret != xpcSuccess) {
|
||||
|
||||
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
|
||||
"which sent interrupt, reason=%d\n", nasid, ret);
|
||||
|
||||
XPC_DEACTIVATE_PARTITION(part, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
part->act_IRQ_rcvd++;
|
||||
|
||||
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
|
||||
"%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
|
||||
remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
|
||||
|
||||
|
||||
if (part->act_state == XPC_P_INACTIVE) {
|
||||
|
||||
part->remote_rp_pa = remote_rsvd_page_pa;
|
||||
dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n",
|
||||
part->remote_rp_pa);
|
||||
|
||||
part->remote_vars_pa = remote_vars_pa;
|
||||
dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
|
||||
part->remote_vars_pa);
|
||||
|
||||
part->last_heartbeat = remote_vars->heartbeat;
|
||||
dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
|
||||
part->last_heartbeat);
|
||||
|
||||
part->remote_vars_part_pa = remote_vars->vars_part_pa;
|
||||
dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
|
||||
part->remote_vars_part_pa);
|
||||
|
||||
part->remote_act_nasid = remote_vars->act_nasid;
|
||||
dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
|
||||
part->remote_act_nasid);
|
||||
|
||||
part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
|
||||
dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
|
||||
part->remote_act_phys_cpuid);
|
||||
|
||||
part->remote_amos_page_pa = remote_vars->amos_page_pa;
|
||||
dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
|
||||
part->remote_amos_page_pa);
|
||||
|
||||
xpc_activate_partition(part);
|
||||
|
||||
} else if (part->remote_amos_page_pa != remote_vars->amos_page_pa ||
|
||||
!XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
|
||||
|
||||
part->reactivate_nasid = nasid;
|
||||
XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Loop through the activation AMO variables and process any bits
|
||||
* which are set. Each bit indicates a nasid sending a partition
|
||||
* activation or deactivation request.
|
||||
*
|
||||
* Return #of IRQs detected.
|
||||
*/
|
||||
int
|
||||
xpc_identify_act_IRQ_sender(void)
|
||||
{
|
||||
int word, bit;
|
||||
u64 nasid_mask;
|
||||
u64 nasid; /* remote nasid */
|
||||
int n_IRQs_detected = 0;
|
||||
AMO_t *act_amos;
|
||||
struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
|
||||
|
||||
|
||||
act_amos = xpc_vars->act_amos;
|
||||
|
||||
|
||||
/* scan through act AMO variable looking for non-zero entries */
|
||||
for (word = 0; word < XP_NASID_MASK_WORDS; word++) {
|
||||
|
||||
nasid_mask = xpc_IPI_receive(&act_amos[word]);
|
||||
if (nasid_mask == 0) {
|
||||
/* no IRQs from nasids in this variable */
|
||||
continue;
|
||||
}
|
||||
|
||||
dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
|
||||
nasid_mask);
|
||||
|
||||
|
||||
/*
|
||||
* If this nasid has been added to the machine since
|
||||
* our partition was reset, this will retain the
|
||||
* remote nasid in our reserved pages machine mask.
|
||||
* This is used in the event of module reload.
|
||||
*/
|
||||
rp->mach_nasids[word] |= nasid_mask;
|
||||
|
||||
|
||||
/* locate the nasid(s) which sent interrupts */
|
||||
|
||||
for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
|
||||
if (nasid_mask & (1UL << bit)) {
|
||||
n_IRQs_detected++;
|
||||
nasid = XPC_NASID_FROM_W_B(word, bit);
|
||||
dev_dbg(xpc_part, "interrupt from nasid %ld\n",
|
||||
nasid);
|
||||
xpc_identify_act_IRQ_req(nasid);
|
||||
}
|
||||
}
|
||||
}
|
||||
return n_IRQs_detected;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Mark specified partition as active.
|
||||
*/
|
||||
enum xpc_retval
|
||||
xpc_mark_partition_active(struct xpc_partition *part)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
|
||||
|
||||
spin_lock_irqsave(&part->act_lock, irq_flags);
|
||||
if (part->act_state == XPC_P_ACTIVATING) {
|
||||
part->act_state = XPC_P_ACTIVE;
|
||||
ret = xpcSuccess;
|
||||
} else {
|
||||
DBUG_ON(part->reason == xpcSuccess);
|
||||
ret = part->reason;
|
||||
}
|
||||
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Notify XPC that the partition is down.
|
||||
*/
|
||||
void
|
||||
xpc_deactivate_partition(const int line, struct xpc_partition *part,
|
||||
enum xpc_retval reason)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
partid_t partid = XPC_PARTID(part);
|
||||
|
||||
|
||||
spin_lock_irqsave(&part->act_lock, irq_flags);
|
||||
|
||||
if (part->act_state == XPC_P_INACTIVE) {
|
||||
XPC_SET_REASON(part, reason, line);
|
||||
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
||||
if (reason == xpcReactivating) {
|
||||
/* we interrupt ourselves to reactivate partition */
|
||||
xpc_IPI_send_reactivate(part);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (part->act_state == XPC_P_DEACTIVATING) {
|
||||
if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
|
||||
reason == xpcReactivating) {
|
||||
XPC_SET_REASON(part, reason, line);
|
||||
}
|
||||
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
part->act_state = XPC_P_DEACTIVATING;
|
||||
XPC_SET_REASON(part, reason, line);
|
||||
|
||||
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
||||
|
||||
XPC_DISALLOW_HB(partid, xpc_vars);
|
||||
|
||||
dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid,
|
||||
reason);
|
||||
|
||||
xpc_partition_down(part, reason);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Mark specified partition as active.
|
||||
*/
|
||||
void
|
||||
xpc_mark_partition_inactive(struct xpc_partition *part)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
|
||||
XPC_PARTID(part));
|
||||
|
||||
spin_lock_irqsave(&part->act_lock, irq_flags);
|
||||
part->act_state = XPC_P_INACTIVE;
|
||||
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
||||
part->remote_rp_pa = 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SAL has provided a partition and machine mask. The partition mask
|
||||
* contains a bit for each even nasid in our partition. The machine
|
||||
* mask contains a bit for each even nasid in the entire machine.
|
||||
*
|
||||
* Using those two bit arrays, we can determine which nasids are
|
||||
* known in the machine. Each should also have a reserved page
|
||||
* initialized if they are available for partitioning.
|
||||
*/
|
||||
void
|
||||
xpc_discovery(void)
|
||||
{
|
||||
void *remote_rp_base;
|
||||
struct xpc_rsvd_page *remote_rp;
|
||||
struct xpc_vars *remote_vars;
|
||||
u64 remote_rsvd_page_pa;
|
||||
u64 remote_vars_pa;
|
||||
int region;
|
||||
int max_regions;
|
||||
int nasid;
|
||||
struct xpc_rsvd_page *rp;
|
||||
partid_t partid;
|
||||
struct xpc_partition *part;
|
||||
u64 *discovered_nasids;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE,
|
||||
GFP_KERNEL, &remote_rp_base);
|
||||
if (remote_rp == NULL) {
|
||||
return;
|
||||
}
|
||||
remote_vars = (struct xpc_vars *) remote_rp;
|
||||
|
||||
|
||||
discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS,
|
||||
GFP_KERNEL);
|
||||
if (discovered_nasids == NULL) {
|
||||
kfree(remote_rp_base);
|
||||
return;
|
||||
}
|
||||
memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS);
|
||||
|
||||
rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
|
||||
|
||||
/*
|
||||
* The term 'region' in this context refers to the minimum number of
|
||||
* nodes that can comprise an access protection grouping. The access
|
||||
* protection is in regards to memory, IOI and IPI.
|
||||
*/
|
||||
//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or
|
||||
//>>> include/asm-ia64/sn/addrs.h
|
||||
#define SH1_MAX_REGIONS 64
|
||||
#define SH2_MAX_REGIONS 256
|
||||
max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS;
|
||||
|
||||
for (region = 0; region < max_regions; region++) {
|
||||
|
||||
if ((volatile int) xpc_exiting) {
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(xpc_part, "searching region %d\n", region);
|
||||
|
||||
for (nasid = (region * sn_region_size * 2);
|
||||
nasid < ((region + 1) * sn_region_size * 2);
|
||||
nasid += 2) {
|
||||
|
||||
if ((volatile int) xpc_exiting) {
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
|
||||
|
||||
|
||||
if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) {
|
||||
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
|
||||
"part of the local partition; skipping "
|
||||
"region\n", nasid);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) {
|
||||
dev_dbg(xpc_part, "PROM indicates Nasid %d was "
|
||||
"not on Numa-Link network at reset\n",
|
||||
nasid);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
|
||||
dev_dbg(xpc_part, "Nasid %d is part of a "
|
||||
"partition which was previously "
|
||||
"discovered\n", nasid);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
/* pull over the reserved page structure */
|
||||
|
||||
ret = xpc_get_remote_rp(nasid, discovered_nasids,
|
||||
remote_rp, &remote_rsvd_page_pa);
|
||||
if (ret != xpcSuccess) {
|
||||
dev_dbg(xpc_part, "unable to get reserved page "
|
||||
"from nasid %d, reason=%d\n", nasid,
|
||||
ret);
|
||||
|
||||
if (ret == xpcLocalPartid) {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
remote_vars_pa = remote_rp->vars_pa;
|
||||
|
||||
partid = remote_rp->partid;
|
||||
part = &xpc_partitions[partid];
|
||||
|
||||
|
||||
/* pull over the cross partition variables */
|
||||
|
||||
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
|
||||
if (ret != xpcSuccess) {
|
||||
dev_dbg(xpc_part, "unable to get XPC variables "
|
||||
"from nasid %d, reason=%d\n", nasid,
|
||||
ret);
|
||||
|
||||
XPC_DEACTIVATE_PARTITION(part, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (part->act_state != XPC_P_INACTIVE) {
|
||||
dev_dbg(xpc_part, "partition %d on nasid %d is "
|
||||
"already activating\n", partid, nasid);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register the remote partition's AMOs with SAL so it
|
||||
* can handle and cleanup errors within that address
|
||||
* range should the remote partition go down. We don't
|
||||
* unregister this range because it is difficult to
|
||||
* tell when outstanding writes to the remote partition
|
||||
* are finished and thus when it is thus safe to
|
||||
* unregister. This should not result in wasted space
|
||||
* in the SAL xp_addr_region table because we should
|
||||
* get the same page for remote_act_amos_pa after
|
||||
* module reloads and system reboots.
|
||||
*/
|
||||
if (sn_register_xp_addr_region(
|
||||
remote_vars->amos_page_pa,
|
||||
PAGE_SIZE, 1) < 0) {
|
||||
dev_dbg(xpc_part, "partition %d failed to "
|
||||
"register xp_addr region 0x%016lx\n",
|
||||
partid, remote_vars->amos_page_pa);
|
||||
|
||||
XPC_SET_REASON(part, xpcPhysAddrRegFailed,
|
||||
__LINE__);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* The remote nasid is valid and available.
|
||||
* Send an interrupt to that nasid to notify
|
||||
* it that we are ready to begin activation.
|
||||
*/
|
||||
dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
|
||||
"nasid %d, phys_cpuid 0x%x\n",
|
||||
remote_vars->amos_page_pa,
|
||||
remote_vars->act_nasid,
|
||||
remote_vars->act_phys_cpuid);
|
||||
|
||||
xpc_IPI_send_activate(remote_vars);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(discovered_nasids);
|
||||
kfree(remote_rp_base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given a partid, get the nasids owned by that partition from the
|
||||
* remote partition's reserved page.
|
||||
*/
|
||||
enum xpc_retval
|
||||
xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
|
||||
{
|
||||
struct xpc_partition *part;
|
||||
u64 part_nasid_pa;
|
||||
int bte_res;
|
||||
|
||||
|
||||
part = &xpc_partitions[partid];
|
||||
if (part->remote_rp_pa == 0) {
|
||||
return xpcPartitionDown;
|
||||
}
|
||||
|
||||
part_nasid_pa = part->remote_rp_pa +
|
||||
(u64) &((struct xpc_rsvd_page *) 0)->part_nasids;
|
||||
|
||||
bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
|
||||
L1_CACHE_ALIGN(XP_NASID_MASK_BYTES),
|
||||
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
|
||||
return xpc_map_bte_errors(bte_res);
|
||||
}
|
||||
|
715
arch/ia64/sn/kernel/xpnet.c
Normal file
715
arch/ia64/sn/kernel/xpnet.c
Normal file
@ -0,0 +1,715 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition Network Interface (XPNET) support
|
||||
*
|
||||
* XPNET provides a virtual network layered on top of the Cross
|
||||
* Partition communication layer.
|
||||
*
|
||||
* XPNET provides direct point-to-point and broadcast-like support
|
||||
* for an ethernet-like device. The ethernet broadcast medium is
|
||||
* replaced with a point-to-point message structure which passes
|
||||
* pointers to a DMA-capable block that a remote partition should
|
||||
* retrieve and pass to the upper level networking layer.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/sn/bte.h>
|
||||
#include <asm/sn/io.h>
|
||||
#include <asm/sn/sn_sal.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/sn/xp.h>
|
||||
|
||||
|
||||
/*
|
||||
* The message payload transferred by XPC.
|
||||
*
|
||||
* buf_pa is the physical address where the DMA should pull from.
|
||||
*
|
||||
* NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
|
||||
* cacheline boundary. To accomplish this, we record the number of
|
||||
* bytes from the beginning of the first cacheline to the first useful
|
||||
* byte of the skb (leadin_ignore) and the number of bytes from the
|
||||
* last useful byte of the skb to the end of the last cacheline
|
||||
* (tailout_ignore).
|
||||
*
|
||||
* size is the number of bytes to transfer which includes the skb->len
|
||||
* (useful bytes of the senders skb) plus the leadin and tailout
|
||||
*/
|
||||
struct xpnet_message {
|
||||
u16 version; /* Version for this message */
|
||||
u16 embedded_bytes; /* #of bytes embedded in XPC message */
|
||||
u32 magic; /* Special number indicating this is xpnet */
|
||||
u64 buf_pa; /* phys address of buffer to retrieve */
|
||||
u32 size; /* #of bytes in buffer */
|
||||
u8 leadin_ignore; /* #of bytes to ignore at the beginning */
|
||||
u8 tailout_ignore; /* #of bytes to ignore at the end */
|
||||
unsigned char data; /* body of small packets */
|
||||
};
|
||||
|
||||
/*
|
||||
* Determine the size of our message, the cacheline aligned size,
|
||||
* and then the number of message will request from XPC.
|
||||
*
|
||||
* XPC expects each message to exist in an individual cacheline.
|
||||
*/
|
||||
#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET)
|
||||
#define XPNET_MSG_DATA_MAX \
|
||||
(XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data))
|
||||
#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
|
||||
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
|
||||
|
||||
|
||||
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
|
||||
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
|
||||
|
||||
/*
|
||||
* Version number of XPNET implementation. XPNET can always talk to versions
|
||||
* with same major #, and never talk to versions with a different version.
|
||||
*/
|
||||
#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
|
||||
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
|
||||
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
|
||||
|
||||
#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
|
||||
#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
|
||||
#define XPNET_MAGIC 0x88786984 /* "XNET" */
|
||||
|
||||
#define XPNET_VALID_MSG(_m) \
|
||||
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
|
||||
&& (msg->magic == XPNET_MAGIC))
|
||||
|
||||
#define XPNET_DEVICE_NAME "xp0"
|
||||
|
||||
|
||||
/*
|
||||
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
|
||||
* of the following type is passed as a notification cookie. When the
|
||||
* notification function is called, we use the cookie to decide
|
||||
* whether all outstanding message sends have completed. The skb can
|
||||
* then be released.
|
||||
*/
|
||||
struct xpnet_pending_msg {
|
||||
struct list_head free_list;
|
||||
struct sk_buff *skb;
|
||||
atomic_t use_count;
|
||||
};
|
||||
|
||||
/* driver specific structure pointed to by the device structure */
|
||||
struct xpnet_dev_private {
|
||||
struct net_device_stats stats;
|
||||
};
|
||||
|
||||
struct net_device *xpnet_device;
|
||||
|
||||
/*
|
||||
* When we are notified of other partitions activating, we add them to
|
||||
* our bitmask of partitions to which we broadcast.
|
||||
*/
|
||||
static u64 xpnet_broadcast_partitions;
|
||||
/* protect above */
|
||||
static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
/*
|
||||
* Since the Block Transfer Engine (BTE) is being used for the transfer
|
||||
* and it relies upon cache-line size transfers, we need to reserve at
|
||||
* least one cache-line for head and tail alignment. The BTE is
|
||||
* limited to 8MB transfers.
|
||||
*
|
||||
* Testing has shown that changing MTU to greater than 64KB has no effect
|
||||
* on TCP as the two sides negotiate a Max Segment Size that is limited
|
||||
* to 64K. Other protocols May use packets greater than this, but for
|
||||
* now, the default is 64KB.
|
||||
*/
|
||||
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
|
||||
/* 32KB has been determined to be the ideal */
|
||||
#define XPNET_DEF_MTU (0x8000UL)
|
||||
|
||||
|
||||
/*
|
||||
* The partition id is encapsulated in the MAC address. The following
|
||||
* define locates the octet the partid is in.
|
||||
*/
|
||||
#define XPNET_PARTID_OCTET 1
|
||||
#define XPNET_LICENSE_OCTET 2
|
||||
|
||||
|
||||
/*
|
||||
* Define the XPNET debug device structure that is to be used with dev_dbg(),
|
||||
* dev_err(), dev_warn(), and dev_info().
|
||||
*/
|
||||
struct device_driver xpnet_dbg_name = {
|
||||
.name = "xpnet"
|
||||
};
|
||||
|
||||
struct device xpnet_dbg_subname = {
|
||||
.bus_id = {0}, /* set to "" */
|
||||
.driver = &xpnet_dbg_name
|
||||
};
|
||||
|
||||
struct device *xpnet = &xpnet_dbg_subname;
|
||||
|
||||
/*
|
||||
* Packet was recevied by XPC and forwarded to us.
|
||||
*/
|
||||
static void
|
||||
xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
bte_result_t bret;
|
||||
struct xpnet_dev_private *priv =
|
||||
(struct xpnet_dev_private *) xpnet_device->priv;
|
||||
|
||||
|
||||
if (!XPNET_VALID_MSG(msg)) {
|
||||
/*
|
||||
* Packet with a different XPC version. Ignore.
|
||||
*/
|
||||
xpc_received(partid, channel, (void *) msg);
|
||||
|
||||
priv->stats.rx_errors++;
|
||||
|
||||
return;
|
||||
}
|
||||
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
|
||||
msg->leadin_ignore, msg->tailout_ignore);
|
||||
|
||||
|
||||
/* reserve an extra cache line */
|
||||
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
|
||||
if (!skb) {
|
||||
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
|
||||
msg->size + L1_CACHE_BYTES);
|
||||
|
||||
xpc_received(partid, channel, (void *) msg);
|
||||
|
||||
priv->stats.rx_errors++;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The allocated skb has some reserved space.
|
||||
* In order to use bte_copy, we need to get the
|
||||
* skb->data pointer moved forward.
|
||||
*/
|
||||
skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
|
||||
(L1_CACHE_BYTES - 1)) +
|
||||
msg->leadin_ignore));
|
||||
|
||||
/*
|
||||
* Update the tail pointer to indicate data actually
|
||||
* transferred.
|
||||
*/
|
||||
skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
|
||||
|
||||
/*
|
||||
* Move the data over from the the other side.
|
||||
*/
|
||||
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
|
||||
(msg->embedded_bytes != 0)) {
|
||||
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
|
||||
"%lu)\n", skb->data, &msg->data,
|
||||
(size_t) msg->embedded_bytes);
|
||||
|
||||
memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
|
||||
} else {
|
||||
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
|
||||
"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
|
||||
(void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
|
||||
msg->size);
|
||||
|
||||
bret = bte_copy(msg->buf_pa,
|
||||
__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
|
||||
msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
|
||||
if (bret != BTE_SUCCESS) {
|
||||
// >>> Need better way of cleaning skb. Currently skb
|
||||
// >>> appears in_use and we can't just call
|
||||
// >>> dev_kfree_skb.
|
||||
dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
|
||||
"error=0x%x\n", (void *)msg->buf_pa,
|
||||
(void *)__pa((u64)skb->data &
|
||||
~(L1_CACHE_BYTES - 1)),
|
||||
msg->size, bret);
|
||||
|
||||
xpc_received(partid, channel, (void *) msg);
|
||||
|
||||
priv->stats.rx_errors++;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
|
||||
"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
|
||||
(void *) skb->data, (void *) skb->tail, (void *) skb->end,
|
||||
skb->len);
|
||||
|
||||
skb->dev = xpnet_device;
|
||||
skb->protocol = eth_type_trans(skb, xpnet_device);
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
|
||||
"skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
|
||||
(void *) skb->head, (void *) skb->data, (void *) skb->tail,
|
||||
(void *) skb->end, skb->len);
|
||||
|
||||
|
||||
xpnet_device->last_rx = jiffies;
|
||||
priv->stats.rx_packets++;
|
||||
priv->stats.rx_bytes += skb->len + ETH_HLEN;
|
||||
|
||||
netif_rx_ni(skb);
|
||||
xpc_received(partid, channel, (void *) msg);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is the handler which XPC calls during any sort of change in
|
||||
* state or message reception on a connection.
|
||||
*/
|
||||
static void
|
||||
xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
|
||||
void *data, void *key)
|
||||
{
|
||||
long bp;
|
||||
|
||||
|
||||
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
|
||||
DBUG_ON(channel != XPC_NET_CHANNEL);
|
||||
|
||||
switch(reason) {
|
||||
case xpcMsgReceived: /* message received */
|
||||
DBUG_ON(data == NULL);
|
||||
|
||||
xpnet_receive(partid, channel, (struct xpnet_message *) data);
|
||||
break;
|
||||
|
||||
case xpcConnected: /* connection completed to a partition */
|
||||
spin_lock_bh(&xpnet_broadcast_lock);
|
||||
xpnet_broadcast_partitions |= 1UL << (partid -1 );
|
||||
bp = xpnet_broadcast_partitions;
|
||||
spin_unlock_bh(&xpnet_broadcast_lock);
|
||||
|
||||
netif_carrier_on(xpnet_device);
|
||||
|
||||
dev_dbg(xpnet, "%s connection created to partition %d; "
|
||||
"xpnet_broadcast_partitions=0x%lx\n",
|
||||
xpnet_device->name, partid, bp);
|
||||
break;
|
||||
|
||||
default:
|
||||
spin_lock_bh(&xpnet_broadcast_lock);
|
||||
xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
|
||||
bp = xpnet_broadcast_partitions;
|
||||
spin_unlock_bh(&xpnet_broadcast_lock);
|
||||
|
||||
if (bp == 0) {
|
||||
netif_carrier_off(xpnet_device);
|
||||
}
|
||||
|
||||
dev_dbg(xpnet, "%s disconnected from partition %d; "
|
||||
"xpnet_broadcast_partitions=0x%lx\n",
|
||||
xpnet_device->name, partid, bp);
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
xpnet_dev_open(struct net_device *dev)
|
||||
{
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, "
|
||||
"%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
|
||||
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
|
||||
XPNET_MAX_IDLE_KTHREADS);
|
||||
|
||||
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
|
||||
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
|
||||
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
|
||||
if (ret != xpcSuccess) {
|
||||
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
|
||||
"ret=%d\n", dev->name, ret);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
xpnet_dev_stop(struct net_device *dev)
|
||||
{
|
||||
xpc_disconnect(XPC_NET_CHANNEL);
|
||||
|
||||
dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* 68 comes from min TCP+IP+MAC header */
|
||||
if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
|
||||
dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
|
||||
"between 68 and %ld\n", dev->name, new_mtu,
|
||||
XPNET_MAX_MTU);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Required for the net_device structure.
|
||||
*/
|
||||
static int
|
||||
xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Return statistics to the caller.
|
||||
*/
|
||||
static struct net_device_stats *
|
||||
xpnet_dev_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct xpnet_dev_private *priv;
|
||||
|
||||
|
||||
priv = (struct xpnet_dev_private *) dev->priv;
|
||||
|
||||
return &priv->stats;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Notification that the other end has received the message and
|
||||
* DMA'd the skb information. At this point, they are done with
|
||||
* our side. When all recipients are done processing, we
|
||||
* release the skb and then release our pending message structure.
|
||||
*/
|
||||
static void
|
||||
xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
|
||||
void *__qm)
|
||||
{
|
||||
struct xpnet_pending_msg *queued_msg =
|
||||
(struct xpnet_pending_msg *) __qm;
|
||||
|
||||
|
||||
DBUG_ON(queued_msg == NULL);
|
||||
|
||||
dev_dbg(xpnet, "message to %d notified with reason %d\n",
|
||||
partid, reason);
|
||||
|
||||
if (atomic_dec_return(&queued_msg->use_count) == 0) {
|
||||
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
|
||||
(void *) queued_msg->skb->head);
|
||||
|
||||
dev_kfree_skb_any(queued_msg->skb);
|
||||
kfree(queued_msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Network layer has formatted a packet (skb) and is ready to place it
|
||||
* "on the wire". Prepare and send an xpnet_message to all partitions
|
||||
* which have connected with us and are targets of this packet.
|
||||
*
|
||||
* MAC-NOTE: For the XPNET driver, the MAC address contains the
|
||||
* destination partition_id. If the destination partition id word
|
||||
* is 0xff, this packet is to broadcast to all partitions.
|
||||
*/
|
||||
static int
|
||||
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct xpnet_pending_msg *queued_msg;
|
||||
enum xpc_retval ret;
|
||||
struct xpnet_message *msg;
|
||||
u64 start_addr, end_addr;
|
||||
long dp;
|
||||
u8 second_mac_octet;
|
||||
partid_t dest_partid;
|
||||
struct xpnet_dev_private *priv;
|
||||
u16 embedded_bytes;
|
||||
|
||||
|
||||
priv = (struct xpnet_dev_private *) dev->priv;
|
||||
|
||||
|
||||
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
|
||||
"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
|
||||
(void *) skb->data, (void *) skb->tail, (void *) skb->end,
|
||||
skb->len);
|
||||
|
||||
|
||||
/*
|
||||
* The xpnet_pending_msg tracks how many outstanding
|
||||
* xpc_send_notifies are relying on this skb. When none
|
||||
* remain, release the skb.
|
||||
*/
|
||||
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
|
||||
if (queued_msg == NULL) {
|
||||
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
|
||||
"packet\n", sizeof(struct xpnet_pending_msg));
|
||||
|
||||
priv->stats.tx_errors++;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
/* get the beginning of the first cacheline and end of last */
|
||||
start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
|
||||
end_addr = L1_CACHE_ALIGN((u64) skb->tail);
|
||||
|
||||
/* calculate how many bytes to embed in the XPC message */
|
||||
embedded_bytes = 0;
|
||||
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
|
||||
/* skb->data does fit so embed */
|
||||
embedded_bytes = skb->len;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Since the send occurs asynchronously, we set the count to one
|
||||
* and begin sending. Any sends that happen to complete before
|
||||
* we are done sending will not free the skb. We will be left
|
||||
* with that task during exit. This also handles the case of
|
||||
* a packet destined for a partition which is no longer up.
|
||||
*/
|
||||
atomic_set(&queued_msg->use_count, 1);
|
||||
queued_msg->skb = skb;
|
||||
|
||||
|
||||
second_mac_octet = skb->data[XPNET_PARTID_OCTET];
|
||||
if (second_mac_octet == 0xff) {
|
||||
/* we are being asked to broadcast to all partitions */
|
||||
dp = xpnet_broadcast_partitions;
|
||||
} else if (second_mac_octet != 0) {
|
||||
dp = xpnet_broadcast_partitions &
|
||||
(1UL << (second_mac_octet - 1));
|
||||
} else {
|
||||
/* 0 is an invalid partid. Ignore */
|
||||
dp = 0;
|
||||
}
|
||||
dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
|
||||
|
||||
/*
|
||||
* If we wanted to allow promiscous mode to work like an
|
||||
* unswitched network, this would be a good point to OR in a
|
||||
* mask of partitions which should be receiving all packets.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Main send loop.
|
||||
*/
|
||||
for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
|
||||
dest_partid++) {
|
||||
|
||||
|
||||
if (!(dp & (1UL << (dest_partid - 1)))) {
|
||||
/* not destined for this partition */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* remove this partition from the destinations mask */
|
||||
dp &= ~(1UL << (dest_partid - 1));
|
||||
|
||||
|
||||
/* found a partition to send to */
|
||||
|
||||
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
|
||||
XPC_NOWAIT, (void **)&msg);
|
||||
if (unlikely(ret != xpcSuccess)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
msg->embedded_bytes = embedded_bytes;
|
||||
if (unlikely(embedded_bytes != 0)) {
|
||||
msg->version = XPNET_VERSION_EMBED;
|
||||
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
|
||||
&msg->data, skb->data, (size_t) embedded_bytes);
|
||||
memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
|
||||
} else {
|
||||
msg->version = XPNET_VERSION;
|
||||
}
|
||||
msg->magic = XPNET_MAGIC;
|
||||
msg->size = end_addr - start_addr;
|
||||
msg->leadin_ignore = (u64) skb->data - start_addr;
|
||||
msg->tailout_ignore = end_addr - (u64) skb->tail;
|
||||
msg->buf_pa = __pa(start_addr);
|
||||
|
||||
dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
|
||||
"0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
|
||||
"msg->tailout_ignore=%u\n", dest_partid,
|
||||
XPC_NET_CHANNEL, msg->buf_pa, msg->size,
|
||||
msg->leadin_ignore, msg->tailout_ignore);
|
||||
|
||||
|
||||
atomic_inc(&queued_msg->use_count);
|
||||
|
||||
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
|
||||
xpnet_send_completed, queued_msg);
|
||||
if (unlikely(ret != xpcSuccess)) {
|
||||
atomic_dec(&queued_msg->use_count);
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&queued_msg->use_count) == 0) {
|
||||
dev_dbg(xpnet, "no partitions to receive packet destined for "
|
||||
"%d\n", dest_partid);
|
||||
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
kfree(queued_msg);
|
||||
}
|
||||
|
||||
priv->stats.tx_packets++;
|
||||
priv->stats.tx_bytes += skb->len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Deal with transmit timeouts coming from the network layer.
|
||||
*/
|
||||
static void
|
||||
xpnet_dev_tx_timeout (struct net_device *dev)
|
||||
{
|
||||
struct xpnet_dev_private *priv;
|
||||
|
||||
|
||||
priv = (struct xpnet_dev_private *) dev->priv;
|
||||
|
||||
priv->stats.tx_errors++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static int __init
|
||||
xpnet_init(void)
|
||||
{
|
||||
int i;
|
||||
u32 license_num;
|
||||
int result = -ENOMEM;
|
||||
|
||||
|
||||
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
|
||||
|
||||
/*
|
||||
* use ether_setup() to init the majority of our device
|
||||
* structure and then override the necessary pieces.
|
||||
*/
|
||||
xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
|
||||
XPNET_DEVICE_NAME, ether_setup);
|
||||
if (xpnet_device == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
netif_carrier_off(xpnet_device);
|
||||
|
||||
xpnet_device->mtu = XPNET_DEF_MTU;
|
||||
xpnet_device->change_mtu = xpnet_dev_change_mtu;
|
||||
xpnet_device->open = xpnet_dev_open;
|
||||
xpnet_device->get_stats = xpnet_dev_get_stats;
|
||||
xpnet_device->stop = xpnet_dev_stop;
|
||||
xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
|
||||
xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
|
||||
xpnet_device->set_config = xpnet_dev_set_config;
|
||||
|
||||
/*
|
||||
* Multicast assumes the LSB of the first octet is set for multicast
|
||||
* MAC addresses. We chose the first octet of the MAC to be unlikely
|
||||
* to collide with any vendor's officially issued MAC.
|
||||
*/
|
||||
xpnet_device->dev_addr[0] = 0xfe;
|
||||
xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
|
||||
license_num = sn_partition_serial_number_val();
|
||||
for (i = 3; i >= 0; i--) {
|
||||
xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
|
||||
license_num & 0xff;
|
||||
license_num = license_num >> 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* ether_setup() sets this to a multicast device. We are
|
||||
* really not supporting multicast at this time.
|
||||
*/
|
||||
xpnet_device->flags &= ~IFF_MULTICAST;
|
||||
|
||||
/*
|
||||
* No need to checksum as it is a DMA transfer. The BTE will
|
||||
* report an error if the data is not retrievable and the
|
||||
* packet will be dropped.
|
||||
*/
|
||||
xpnet_device->features = NETIF_F_NO_CSUM;
|
||||
|
||||
result = register_netdev(xpnet_device);
|
||||
if (result != 0) {
|
||||
free_netdev(xpnet_device);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
module_init(xpnet_init);
|
||||
|
||||
|
||||
static void __exit
|
||||
xpnet_exit(void)
|
||||
{
|
||||
dev_info(xpnet, "unregistering network device %s\n",
|
||||
xpnet_device[0].name);
|
||||
|
||||
unregister_netdev(xpnet_device);
|
||||
|
||||
free_netdev(xpnet_device);
|
||||
}
|
||||
module_exit(xpnet_exit);
|
||||
|
||||
|
||||
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
||||
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr)
|
||||
spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
|
||||
sfdl_flush_lock, flags);
|
||||
|
||||
p->sfdl_flush_value = 0;
|
||||
*p->sfdl_flush_addr = 0;
|
||||
|
||||
/* force an interrupt. */
|
||||
*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
|
||||
|
@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
|
||||
ca_dmamap->cad_dma_addr = bus_addr;
|
||||
ca_dmamap->cad_gart_size = entries;
|
||||
ca_dmamap->cad_gart_entry = entry;
|
||||
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
|
||||
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
|
||||
|
||||
if (xio_addr % ps) {
|
||||
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
|
||||
|
@ -534,6 +534,11 @@ endchoice
|
||||
|
||||
endmenu
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
depends on !M5272
|
||||
default y
|
||||
|
||||
menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
|
||||
|
||||
config PCI
|
||||
|
@ -1656,3 +1656,7 @@ config GENERIC_HARDIRQS
|
||||
config GENERIC_IRQ_PROBE
|
||||
bool
|
||||
default y
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
@ -45,6 +45,10 @@ config GENERIC_IRQ_PROBE
|
||||
config PM
|
||||
bool
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
|
||||
|
@ -1079,6 +1079,10 @@ source kernel/power/Kconfig
|
||||
|
||||
endmenu
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
menu "Bus options"
|
||||
|
||||
config ISA
|
||||
|
@ -293,6 +293,9 @@ config SECCOMP
|
||||
|
||||
endmenu
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
menu "General setup"
|
||||
|
||||
|
@ -56,12 +56,19 @@ LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
|
||||
CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \
|
||||
-mcall-aixdesc
|
||||
|
||||
GCC_VERSION := $(call cc-version)
|
||||
GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
|
||||
|
||||
ifeq ($(CONFIG_POWER4_ONLY),y)
|
||||
ifeq ($(CONFIG_ALTIVEC),y)
|
||||
ifeq ($(GCC_BROKEN_VEC),y)
|
||||
CFLAGS += $(call cc-option,-mcpu=970)
|
||||
else
|
||||
CFLAGS += $(call cc-option,-mcpu=power4)
|
||||
endif
|
||||
else
|
||||
CFLAGS += $(call cc-option,-mcpu=power4)
|
||||
endif
|
||||
else
|
||||
CFLAGS += $(call cc-option,-mtune=power4)
|
||||
endif
|
||||
|
@ -693,6 +693,10 @@ config RTC_9701JE
|
||||
|
||||
endmenu
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
depends on MPC1211
|
||||
default y
|
||||
|
||||
menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
|
||||
|
||||
|
@ -47,9 +47,9 @@ prom_sortmemlist(struct linux_mlist_v0 *thislist)
|
||||
char *tmpaddr;
|
||||
char *lowest;
|
||||
|
||||
for(i=0; thislist[i].theres_more != 0; i++) {
|
||||
for(i=0; thislist[i].theres_more; i++) {
|
||||
lowest = thislist[i].start_adr;
|
||||
for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
|
||||
for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
|
||||
if(thislist[mitr].start_adr < lowest) {
|
||||
lowest = thislist[mitr].start_adr;
|
||||
swapi = mitr;
|
||||
@ -85,7 +85,7 @@ void __init prom_meminit(void)
|
||||
prom_phys_total[iter].num_bytes = mptr->num_bytes;
|
||||
prom_phys_total[iter].theres_more = &prom_phys_total[iter+1];
|
||||
}
|
||||
prom_phys_total[iter-1].theres_more = 0x0;
|
||||
prom_phys_total[iter-1].theres_more = NULL;
|
||||
/* Second, the total prom taken descriptors. */
|
||||
for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0;
|
||||
mptr; mptr=mptr->theres_more, iter++) {
|
||||
@ -93,7 +93,7 @@ void __init prom_meminit(void)
|
||||
prom_prom_taken[iter].num_bytes = mptr->num_bytes;
|
||||
prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1];
|
||||
}
|
||||
prom_prom_taken[iter-1].theres_more = 0x0;
|
||||
prom_prom_taken[iter-1].theres_more = NULL;
|
||||
/* Last, the available physical descriptors. */
|
||||
for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0;
|
||||
mptr; mptr=mptr->theres_more, iter++) {
|
||||
@ -101,7 +101,7 @@ void __init prom_meminit(void)
|
||||
prom_phys_avail[iter].num_bytes = mptr->num_bytes;
|
||||
prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1];
|
||||
}
|
||||
prom_phys_avail[iter-1].theres_more = 0x0;
|
||||
prom_phys_avail[iter-1].theres_more = NULL;
|
||||
/* Sort all the lists. */
|
||||
prom_sortmemlist(prom_phys_total);
|
||||
prom_sortmemlist(prom_prom_taken);
|
||||
@ -124,7 +124,7 @@ void __init prom_meminit(void)
|
||||
prom_phys_avail[iter].theres_more =
|
||||
&prom_phys_avail[iter+1];
|
||||
}
|
||||
prom_phys_avail[iter-1].theres_more = 0x0;
|
||||
prom_phys_avail[iter-1].theres_more = NULL;
|
||||
|
||||
num_regs = prom_getproperty(node, "reg",
|
||||
(char *) prom_reg_memlist,
|
||||
@ -138,7 +138,7 @@ void __init prom_meminit(void)
|
||||
prom_phys_total[iter].theres_more =
|
||||
&prom_phys_total[iter+1];
|
||||
}
|
||||
prom_phys_total[iter-1].theres_more = 0x0;
|
||||
prom_phys_total[iter-1].theres_more = NULL;
|
||||
|
||||
node = prom_getchild(prom_root_node);
|
||||
node = prom_searchsiblings(node, "virtual-memory");
|
||||
@ -158,7 +158,7 @@ void __init prom_meminit(void)
|
||||
prom_prom_taken[iter].theres_more =
|
||||
&prom_prom_taken[iter+1];
|
||||
}
|
||||
prom_prom_taken[iter-1].theres_more = 0x0;
|
||||
prom_prom_taken[iter-1].theres_more = NULL;
|
||||
|
||||
prom_sortmemlist(prom_prom_taken);
|
||||
|
||||
@ -182,15 +182,15 @@ void __init prom_meminit(void)
|
||||
case PROM_SUN4:
|
||||
#ifdef CONFIG_SUN4
|
||||
/* how simple :) */
|
||||
prom_phys_total[0].start_adr = 0x0;
|
||||
prom_phys_total[0].start_adr = NULL;
|
||||
prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize);
|
||||
prom_phys_total[0].theres_more = 0x0;
|
||||
prom_prom_taken[0].start_adr = 0x0;
|
||||
prom_phys_total[0].theres_more = NULL;
|
||||
prom_prom_taken[0].start_adr = NULL;
|
||||
prom_prom_taken[0].num_bytes = 0x0;
|
||||
prom_prom_taken[0].theres_more = 0x0;
|
||||
prom_phys_avail[0].start_adr = 0x0;
|
||||
prom_prom_taken[0].theres_more = NULL;
|
||||
prom_phys_avail[0].start_adr = NULL;
|
||||
prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail);
|
||||
prom_phys_avail[0].theres_more = 0x0;
|
||||
prom_phys_avail[0].theres_more = NULL;
|
||||
#endif
|
||||
break;
|
||||
|
||||
|
@ -151,7 +151,7 @@ struct linux_romvec * __init sun4_prom_init(void)
|
||||
* have more time, we can teach the penguin to say "By your
|
||||
* command" or "Activating turbo boost, Michael". :-)
|
||||
*/
|
||||
sun4_romvec->setLEDs(0x0);
|
||||
sun4_romvec->setLEDs(NULL);
|
||||
|
||||
printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n",
|
||||
sun4_romvec->monid,
|
||||
|
@ -756,7 +756,7 @@ void handler_irq(int irq, struct pt_regs *regs)
|
||||
clear_softint(clr_mask);
|
||||
}
|
||||
#else
|
||||
int should_forward = 1;
|
||||
int should_forward = 0;
|
||||
|
||||
clear_softint(1 << irq);
|
||||
#endif
|
||||
@ -1007,10 +1007,10 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
|
||||
}
|
||||
upa_writel(tid | IMAP_VALID, imap);
|
||||
|
||||
while (!cpu_online(goal_cpu)) {
|
||||
do {
|
||||
if (++goal_cpu >= NR_CPUS)
|
||||
goal_cpu = 0;
|
||||
}
|
||||
} while (!cpu_online(goal_cpu));
|
||||
|
||||
return goal_cpu;
|
||||
}
|
||||
|
@ -379,6 +379,11 @@ config GENERIC_IRQ_PROBE
|
||||
bool
|
||||
default y
|
||||
|
||||
# we have no ISA slots, but we do have ISA-style DMA.
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
default y
|
||||
|
||||
menu "Power management options"
|
||||
|
||||
source kernel/power/Kconfig
|
||||
|
@ -405,9 +405,8 @@ void device_release_driver(struct device * dev)
|
||||
|
||||
static void driver_detach(struct device_driver * drv)
|
||||
{
|
||||
struct list_head * entry, * next;
|
||||
list_for_each_safe(entry, next, &drv->devices) {
|
||||
struct device * dev = container_of(entry, struct device, driver_list);
|
||||
while (!list_empty(&drv->devices)) {
|
||||
struct device * dev = container_of(drv->devices.next, struct device, driver_list);
|
||||
device_release_driver(dev);
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ static int dev_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
|
||||
buffer = &buffer[length];
|
||||
buffer_size -= length;
|
||||
|
||||
if (dev->bus->hotplug) {
|
||||
if (dev->bus && dev->bus->hotplug) {
|
||||
/* have the bus specific function add its stuff */
|
||||
retval = dev->bus->hotplug (dev, envp, num_envp, buffer, buffer_size);
|
||||
if (retval) {
|
||||
|
@ -105,7 +105,7 @@ config ATARI_SLM
|
||||
|
||||
config BLK_DEV_XD
|
||||
tristate "XT hard disk support"
|
||||
depends on ISA
|
||||
depends on ISA && ISA_DMA_API
|
||||
help
|
||||
Very old 8 bit hard disk controllers used in the IBM XT computer
|
||||
will be supported if you say Y here.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
|
||||
#define VERSION "6"
|
||||
#define VERSION "10"
|
||||
#define AOE_MAJOR 152
|
||||
#define DEVICE_NAME "aoe"
|
||||
|
||||
|
@ -37,6 +37,13 @@ static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
|
||||
}
|
||||
/* firmware version */
|
||||
static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page)
|
||||
{
|
||||
struct aoedev *d = disk->private_data;
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
|
||||
}
|
||||
|
||||
static struct disk_attribute disk_attr_state = {
|
||||
.attr = {.name = "state", .mode = S_IRUGO },
|
||||
@ -50,6 +57,10 @@ static struct disk_attribute disk_attr_netif = {
|
||||
.attr = {.name = "netif", .mode = S_IRUGO },
|
||||
.show = aoedisk_show_netif
|
||||
};
|
||||
static struct disk_attribute disk_attr_fwver = {
|
||||
.attr = {.name = "firmware-version", .mode = S_IRUGO },
|
||||
.show = aoedisk_show_fwver
|
||||
};
|
||||
|
||||
static void
|
||||
aoedisk_add_sysfs(struct aoedev *d)
|
||||
@ -57,6 +68,7 @@ aoedisk_add_sysfs(struct aoedev *d)
|
||||
sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
|
||||
sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
|
||||
sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
|
||||
sysfs_create_file(&d->gd->kobj, &disk_attr_fwver.attr);
|
||||
}
|
||||
void
|
||||
aoedisk_rm_sysfs(struct aoedev *d)
|
||||
@ -64,6 +76,7 @@ aoedisk_rm_sysfs(struct aoedev *d)
|
||||
sysfs_remove_link(&d->gd->kobj, "state");
|
||||
sysfs_remove_link(&d->gd->kobj, "mac");
|
||||
sysfs_remove_link(&d->gd->kobj, "netif");
|
||||
sysfs_remove_link(&d->gd->kobj, "firmware-version");
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -109,25 +109,22 @@ aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bu
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
|
||||
for (d=devlist; d; d=d->next)
|
||||
if (d->sysminor == sysminor
|
||||
|| memcmp(d->addr, addr, sizeof d->addr) == 0)
|
||||
if (d->sysminor == sysminor)
|
||||
break;
|
||||
|
||||
if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) {
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
|
||||
return NULL;
|
||||
}
|
||||
} /* if newdev, (d->flags & DEVFL_UP) == 0 for below */
|
||||
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
|
||||
d->ifp = ifp;
|
||||
|
||||
if (d->sysminor != sysminor
|
||||
|| (d->flags & DEVFL_UP) == 0) {
|
||||
memcpy(d->addr, addr, sizeof d->addr);
|
||||
if ((d->flags & DEVFL_UP) == 0) {
|
||||
aoedev_downdev(d); /* flushes outstanding frames */
|
||||
memcpy(d->addr, addr, sizeof d->addr);
|
||||
d->sysminor = sysminor;
|
||||
d->aoemajor = AOEMAJOR(sysminor);
|
||||
d->aoeminor = AOEMINOR(sysminor);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include "aoe.h"
|
||||
|
||||
#define NECODES 5
|
||||
@ -26,6 +27,19 @@ enum {
|
||||
};
|
||||
|
||||
static char aoe_iflist[IFLISTSZ];
|
||||
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
|
||||
MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"\n");
|
||||
|
||||
#ifndef MODULE
|
||||
static int __init aoe_iflist_setup(char *str)
|
||||
{
|
||||
strncpy(aoe_iflist, str, IFLISTSZ);
|
||||
aoe_iflist[IFLISTSZ - 1] = '\0';
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("aoe_iflist=", aoe_iflist_setup);
|
||||
#endif
|
||||
|
||||
int
|
||||
is_aoe_netif(struct net_device *ifp)
|
||||
@ -36,7 +50,8 @@ is_aoe_netif(struct net_device *ifp)
|
||||
if (aoe_iflist[0] == '\0')
|
||||
return 1;
|
||||
|
||||
for (p = aoe_iflist; *p; p = q + strspn(q, WHITESPACE)) {
|
||||
p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
|
||||
for (; *p; p = q + strspn(q, WHITESPACE)) {
|
||||
q = p + strcspn(p, WHITESPACE);
|
||||
if (q != p)
|
||||
len = q - p;
|
||||
|
@ -153,7 +153,7 @@ config DIGIEPCA
|
||||
|
||||
config ESPSERIAL
|
||||
tristate "Hayes ESP serial port support"
|
||||
depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP
|
||||
depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP && ISA_DMA_API
|
||||
help
|
||||
This is a driver which supports Hayes ESP serial ports. Both single
|
||||
port cards and multiport cards are supported. Make sure to read
|
||||
@ -195,7 +195,7 @@ config ISI
|
||||
|
||||
config SYNCLINK
|
||||
tristate "Microgate SyncLink card support"
|
||||
depends on SERIAL_NONSTANDARD && PCI
|
||||
depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API
|
||||
help
|
||||
Provides support for the SyncLink ISA and PCI multiprotocol serial
|
||||
adapters. These adapters support asynchronous and HDLC bit
|
||||
@ -408,7 +408,7 @@ config SGI_TIOCX
|
||||
|
||||
config SGI_MBCS
|
||||
tristate "SGI FPGA Core Services driver support"
|
||||
depends on (IA64_SGI_SN2 || IA64_GENERIC)
|
||||
depends on SGI_TIOCX
|
||||
help
|
||||
If you have an SGI Altix with an attached SABrick
|
||||
say Y or M here, otherwise say N.
|
||||
|
@ -1617,15 +1617,15 @@ typedef struct dmi_header
|
||||
u16 handle;
|
||||
} dmi_header_t;
|
||||
|
||||
static int decode_dmi(dmi_header_t *dm, int intf_num)
|
||||
static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
|
||||
{
|
||||
u8 *data = (u8 *)dm;
|
||||
u8 __iomem *data = (u8 __iomem *)dm;
|
||||
unsigned long base_addr;
|
||||
u8 reg_spacing;
|
||||
u8 len = dm->length;
|
||||
u8 len = readb(&dm->length);
|
||||
dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
|
||||
|
||||
ipmi_data->type = data[4];
|
||||
ipmi_data->type = readb(&data[4]);
|
||||
|
||||
memcpy(&base_addr, data+8, sizeof(unsigned long));
|
||||
if (len >= 0x11) {
|
||||
@ -1640,12 +1640,12 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
|
||||
}
|
||||
/* If bit 4 of byte 0x10 is set, then the lsb for the address
|
||||
is odd. */
|
||||
ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
|
||||
ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4);
|
||||
|
||||
ipmi_data->irq = data[0x11];
|
||||
ipmi_data->irq = readb(&data[0x11]);
|
||||
|
||||
/* The top two bits of byte 0x10 hold the register spacing. */
|
||||
reg_spacing = (data[0x10] & 0xC0) >> 6;
|
||||
reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6;
|
||||
switch(reg_spacing){
|
||||
case 0x00: /* Byte boundaries */
|
||||
ipmi_data->offset = 1;
|
||||
@ -1673,7 +1673,7 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
|
||||
ipmi_data->offset = 1;
|
||||
}
|
||||
|
||||
ipmi_data->slave_addr = data[6];
|
||||
ipmi_data->slave_addr = readb(&data[6]);
|
||||
|
||||
if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
|
||||
dmi_data_entries++;
|
||||
@ -1687,9 +1687,9 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
|
||||
|
||||
static int dmi_table(u32 base, int len, int num)
|
||||
{
|
||||
u8 *buf;
|
||||
struct dmi_header *dm;
|
||||
u8 *data;
|
||||
u8 __iomem *buf;
|
||||
struct dmi_header __iomem *dm;
|
||||
u8 __iomem *data;
|
||||
int i=1;
|
||||
int status=-1;
|
||||
int intf_num = 0;
|
||||
@ -1702,12 +1702,12 @@ static int dmi_table(u32 base, int len, int num)
|
||||
|
||||
while(i<num && (data - buf) < len)
|
||||
{
|
||||
dm=(dmi_header_t *)data;
|
||||
dm=(dmi_header_t __iomem *)data;
|
||||
|
||||
if((data-buf+dm->length) >= len)
|
||||
if((data-buf+readb(&dm->length)) >= len)
|
||||
break;
|
||||
|
||||
if (dm->type == 38) {
|
||||
if (readb(&dm->type) == 38) {
|
||||
if (decode_dmi(dm, intf_num) == 0) {
|
||||
intf_num++;
|
||||
if (intf_num >= SI_MAX_DRIVERS)
|
||||
@ -1715,8 +1715,8 @@ static int dmi_table(u32 base, int len, int num)
|
||||
}
|
||||
}
|
||||
|
||||
data+=dm->length;
|
||||
while((data-buf) < len && (*data || data[1]))
|
||||
data+=readb(&dm->length);
|
||||
while((data-buf) < len && (readb(data)||readb(data+1)))
|
||||
data++;
|
||||
data+=2;
|
||||
i++;
|
||||
|
@ -51,7 +51,7 @@ struct si_sm_io
|
||||
/* Generic info used by the actual handling routines, the
|
||||
state machine shouldn't touch these. */
|
||||
void *info;
|
||||
void *addr;
|
||||
void __iomem *addr;
|
||||
int regspacing;
|
||||
int regsize;
|
||||
int regshift;
|
||||
|
@ -394,7 +394,7 @@ int mbcs_open(struct inode *ip, struct file *fp)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
|
||||
ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
|
||||
{
|
||||
struct cx_dev *cx_dev = fp->private_data;
|
||||
struct mbcs_soft *soft = cx_dev->soft;
|
||||
@ -419,7 +419,7 @@ ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
|
||||
}
|
||||
|
||||
ssize_t
|
||||
mbcs_sram_write(struct file * fp, const char *buf, size_t len, loff_t * off)
|
||||
mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
|
||||
{
|
||||
struct cx_dev *cx_dev = fp->private_data;
|
||||
struct mbcs_soft *soft = cx_dev->soft;
|
||||
|
@ -543,9 +543,9 @@ struct mbcs_soft {
|
||||
};
|
||||
|
||||
extern int mbcs_open(struct inode *ip, struct file *fp);
|
||||
extern ssize_t mbcs_sram_read(struct file *fp, char *buf, size_t len,
|
||||
extern ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
|
||||
loff_t * off);
|
||||
extern ssize_t mbcs_sram_write(struct file *fp, const char *buf, size_t len,
|
||||
extern ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
|
||||
loff_t * off);
|
||||
extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
|
||||
extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
|
||||
|
@ -1021,11 +1021,11 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
if (copy_to_user((u8 *)arg, &val8, sizeof(val8)))
|
||||
if (copy_to_user(argp, &val8, sizeof(val8)))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
case SONYPI_IOCSFAN:
|
||||
if (copy_from_user(&val8, (u8 *)arg, sizeof(val8))) {
|
||||
if (copy_from_user(&val8, argp, sizeof(val8))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
@ -1038,7 +1038,7 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
if (copy_to_user((u8 *)arg, &val8, sizeof(val8)))
|
||||
if (copy_to_user(argp, &val8, sizeof(val8)))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
default:
|
||||
|
@ -51,7 +51,7 @@ config MMC_PXA
|
||||
|
||||
config MMC_WBSD
|
||||
tristate "Winbond W83L51xD SD/MMC Card Interface support"
|
||||
depends on MMC && ISA
|
||||
depends on MMC && ISA && ISA_DMA_API
|
||||
help
|
||||
This selects the Winbond(R) W83L51xD Secure digital and
|
||||
Multimedia card Interface.
|
||||
|
@ -589,7 +589,7 @@ config EL2
|
||||
|
||||
config ELPLUS
|
||||
tristate "3c505 \"EtherLink Plus\" support"
|
||||
depends on NET_VENDOR_3COM && ISA
|
||||
depends on NET_VENDOR_3COM && ISA && ISA_DMA_API
|
||||
---help---
|
||||
Information about this network (Ethernet) card can be found in
|
||||
<file:Documentation/networking/3c505.txt>. If you have a card of
|
||||
@ -630,7 +630,7 @@ config EL3
|
||||
|
||||
config 3C515
|
||||
tristate "3c515 ISA \"Fast EtherLink\""
|
||||
depends on NET_VENDOR_3COM && (ISA || EISA)
|
||||
depends on NET_VENDOR_3COM && (ISA || EISA) && ISA_DMA_API
|
||||
help
|
||||
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
|
||||
network card, say Y and read the Ethernet-HOWTO, available from
|
||||
@ -708,7 +708,7 @@ config TYPHOON
|
||||
|
||||
config LANCE
|
||||
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
|
||||
depends on NET_ETHERNET && ISA
|
||||
depends on NET_ETHERNET && ISA && ISA_DMA_API
|
||||
help
|
||||
If you have a network (Ethernet) card of this type, say Y and read
|
||||
the Ethernet-HOWTO, available from
|
||||
@ -864,7 +864,7 @@ config NI52
|
||||
|
||||
config NI65
|
||||
tristate "NI6510 support"
|
||||
depends on NET_VENDOR_RACAL && ISA
|
||||
depends on NET_VENDOR_RACAL && ISA && ISA_DMA_API
|
||||
help
|
||||
If you have a network (Ethernet) card of this type, say Y and read
|
||||
the Ethernet-HOWTO, available from
|
||||
@ -1072,7 +1072,7 @@ config NE2000
|
||||
|
||||
config ZNET
|
||||
tristate "Zenith Z-Note support (EXPERIMENTAL)"
|
||||
depends on NET_ISA && EXPERIMENTAL
|
||||
depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API
|
||||
help
|
||||
The Zenith Z-Note notebook computer has a built-in network
|
||||
(Ethernet) card, and this is the Linux driver for it. Note that the
|
||||
|
@ -13,7 +13,7 @@ config DEV_APPLETALK
|
||||
|
||||
config LTPC
|
||||
tristate "Apple/Farallon LocalTalk PC support"
|
||||
depends on DEV_APPLETALK && (ISA || EISA)
|
||||
depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
|
||||
help
|
||||
This allows you to use the AppleTalk PC card to connect to LocalTalk
|
||||
networks. The card is also known as the Farallon PhoneNet PC card.
|
||||
|
@ -45,7 +45,7 @@ config BPQETHER
|
||||
|
||||
config DMASCC
|
||||
tristate "High-speed (DMA) SCC driver for AX.25"
|
||||
depends on ISA && AX25 && BROKEN_ON_SMP
|
||||
depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
|
||||
---help---
|
||||
This is a driver for high-speed SCC boards, i.e. those supporting
|
||||
DMA on one port. You usually use those boards to connect your
|
||||
@ -78,7 +78,7 @@ config DMASCC
|
||||
|
||||
config SCC
|
||||
tristate "Z8530 SCC driver"
|
||||
depends on ISA && AX25
|
||||
depends on ISA && AX25 && ISA_DMA_API
|
||||
---help---
|
||||
These cards are used to connect your Linux box to an amateur radio
|
||||
in order to communicate with other computers. If you want to use
|
||||
|
@ -310,7 +310,7 @@ config SIGMATEL_FIR
|
||||
|
||||
config NSC_FIR
|
||||
tristate "NSC PC87108/PC87338"
|
||||
depends on IRDA
|
||||
depends on IRDA && ISA_DMA_API
|
||||
help
|
||||
Say Y here if you want to build support for the NSC PC87108 and
|
||||
PC87338 IrDA chipsets. This driver supports SIR,
|
||||
@ -321,7 +321,7 @@ config NSC_FIR
|
||||
|
||||
config WINBOND_FIR
|
||||
tristate "Winbond W83977AF (IR)"
|
||||
depends on IRDA
|
||||
depends on IRDA && ISA_DMA_API
|
||||
help
|
||||
Say Y here if you want to build IrDA support for the Winbond
|
||||
W83977AF super-io chipset. This driver should be used for the IrDA
|
||||
@ -347,7 +347,7 @@ config AU1000_FIR
|
||||
|
||||
config SMC_IRCC_FIR
|
||||
tristate "SMSC IrCC (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && IRDA
|
||||
depends on EXPERIMENTAL && IRDA && ISA_DMA_API
|
||||
help
|
||||
Say Y here if you want to build support for the SMC Infrared
|
||||
Communications Controller. It is used in a wide variety of
|
||||
@ -357,7 +357,7 @@ config SMC_IRCC_FIR
|
||||
|
||||
config ALI_FIR
|
||||
tristate "ALi M5123 FIR (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && IRDA
|
||||
depends on EXPERIMENTAL && IRDA && ISA_DMA_API
|
||||
help
|
||||
Say Y here if you want to build support for the ALi M5123 FIR
|
||||
Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
|
||||
@ -385,7 +385,7 @@ config SA1100_FIR
|
||||
|
||||
config VIA_FIR
|
||||
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
|
||||
depends on IRDA
|
||||
depends on IRDA && ISA_DMA_API
|
||||
help
|
||||
Say Y here if you want to build support for the VIA VT8231
|
||||
and VIA VT1211 IrDA controllers, found on the motherboards using
|
||||
|
@ -87,8 +87,7 @@ static void z_comp_free(void *arg)
|
||||
|
||||
if (state) {
|
||||
zlib_deflateEnd(&state->strm);
|
||||
if (state->strm.workspace)
|
||||
vfree(state->strm.workspace);
|
||||
vfree(state->strm.workspace);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
@ -308,8 +307,7 @@ static void z_decomp_free(void *arg)
|
||||
|
||||
if (state) {
|
||||
zlib_inflateEnd(&state->strm);
|
||||
if (state->strm.workspace)
|
||||
kfree(state->strm.workspace);
|
||||
kfree(state->strm.workspace);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
|
@ -2467,14 +2467,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
|
||||
skb_queue_purge(&ppp->mrq);
|
||||
#endif /* CONFIG_PPP_MULTILINK */
|
||||
#ifdef CONFIG_PPP_FILTER
|
||||
if (ppp->pass_filter) {
|
||||
kfree(ppp->pass_filter);
|
||||
ppp->pass_filter = NULL;
|
||||
}
|
||||
if (ppp->active_filter) {
|
||||
kfree(ppp->active_filter);
|
||||
ppp->active_filter = NULL;
|
||||
}
|
||||
kfree(ppp->pass_filter);
|
||||
ppp->pass_filter = NULL;
|
||||
kfree(ppp->active_filter);
|
||||
ppp->active_filter = NULL;
|
||||
#endif /* CONFIG_PPP_FILTER */
|
||||
|
||||
kfree(ppp);
|
||||
|
@ -26,7 +26,7 @@ config WAN
|
||||
# There is no way to detect a comtrol sv11 - force it modular for now.
|
||||
config HOSTESS_SV11
|
||||
tristate "Comtrol Hostess SV-11 support"
|
||||
depends on WAN && ISA && m
|
||||
depends on WAN && ISA && m && ISA_DMA_API
|
||||
help
|
||||
Driver for Comtrol Hostess SV-11 network card which
|
||||
operates on low speed synchronous serial links at up to
|
||||
@ -38,7 +38,7 @@ config HOSTESS_SV11
|
||||
# The COSA/SRP driver has not been tested as non-modular yet.
|
||||
config COSA
|
||||
tristate "COSA/SRP sync serial boards support"
|
||||
depends on WAN && ISA && m
|
||||
depends on WAN && ISA && m && ISA_DMA_API
|
||||
---help---
|
||||
Driver for COSA and SRP synchronous serial boards.
|
||||
|
||||
@ -127,7 +127,7 @@ config LANMEDIA
|
||||
# There is no way to detect a Sealevel board. Force it modular
|
||||
config SEALEVEL_4021
|
||||
tristate "Sealevel Systems 4021 support"
|
||||
depends on WAN && ISA && m
|
||||
depends on WAN && ISA && m && ISA_DMA_API
|
||||
help
|
||||
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
|
||||
|
||||
|
@ -436,9 +436,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
|
||||
}
|
||||
|
||||
if (err) {
|
||||
if (chan->local_addr)
|
||||
kfree(chan->local_addr);
|
||||
|
||||
kfree(chan->local_addr);
|
||||
kfree(chan);
|
||||
return err;
|
||||
}
|
||||
@ -458,9 +456,7 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
|
||||
struct cycx_x25_channel *chan = dev->priv;
|
||||
|
||||
if (chan->svc) {
|
||||
if (chan->local_addr)
|
||||
kfree(chan->local_addr);
|
||||
|
||||
kfree(chan->local_addr);
|
||||
if (chan->state == WAN_CONNECTED)
|
||||
del_timer(&chan->timer);
|
||||
}
|
||||
|
@ -400,10 +400,8 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
|
||||
cpc_tty->buf_rx.last = NULL;
|
||||
}
|
||||
|
||||
if (cpc_tty->buf_tx) {
|
||||
kfree(cpc_tty->buf_tx);
|
||||
cpc_tty->buf_tx = NULL;
|
||||
}
|
||||
kfree(cpc_tty->buf_tx);
|
||||
cpc_tty->buf_tx = NULL;
|
||||
|
||||
CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
|
||||
|
||||
@ -666,7 +664,7 @@ static void cpc_tty_rx_work(void * data)
|
||||
unsigned long port;
|
||||
int i, j;
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
volatile st_cpc_rx_buf * buf;
|
||||
volatile st_cpc_rx_buf *buf;
|
||||
char flags=0,flg_rx=1;
|
||||
struct tty_ldisc *ld;
|
||||
|
||||
@ -680,9 +678,9 @@ static void cpc_tty_rx_work(void * data)
|
||||
cpc_tty = &cpc_tty_area[port];
|
||||
|
||||
if ((buf=cpc_tty->buf_rx.first) != 0) {
|
||||
if(cpc_tty->tty) {
|
||||
if (cpc_tty->tty) {
|
||||
ld = tty_ldisc_ref(cpc_tty->tty);
|
||||
if(ld) {
|
||||
if (ld) {
|
||||
if (ld->receive_buf) {
|
||||
CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
|
||||
ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
|
||||
@ -691,7 +689,7 @@ static void cpc_tty_rx_work(void * data)
|
||||
}
|
||||
}
|
||||
cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
|
||||
kfree((unsigned char *)buf);
|
||||
kfree(buf);
|
||||
buf = cpc_tty->buf_rx.first;
|
||||
flg_rx = 1;
|
||||
}
|
||||
@ -733,7 +731,7 @@ static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
|
||||
|
||||
void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
{
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
|
||||
pc300_t *card = (pc300_t *)pc300chan->card;
|
||||
int ch = pc300chan->channel;
|
||||
@ -742,7 +740,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
int rx_len, rx_aux;
|
||||
volatile unsigned char status;
|
||||
unsigned short first_bd = pc300chan->rx_first_bd;
|
||||
st_cpc_rx_buf *new=NULL;
|
||||
st_cpc_rx_buf *new = NULL;
|
||||
unsigned char dsr_rx;
|
||||
|
||||
if (pc300dev->cpc_tty == NULL) {
|
||||
@ -762,7 +760,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
if (status & DST_EOM) {
|
||||
break;
|
||||
}
|
||||
ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
|
||||
ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
|
||||
}
|
||||
|
||||
if (!rx_len) {
|
||||
@ -771,10 +769,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
|
||||
RX_BD_ADDR(ch, pc300chan->rx_last_bd));
|
||||
}
|
||||
if (new) {
|
||||
kfree(new);
|
||||
new = NULL;
|
||||
}
|
||||
kfree(new);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -787,7 +782,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
continue;
|
||||
}
|
||||
|
||||
new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
|
||||
new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
|
||||
if (new == 0) {
|
||||
cpc_tty_rx_disc_frame(pc300chan);
|
||||
continue;
|
||||
|
@ -3664,15 +3664,10 @@ static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp)
|
||||
chdlc_disable_comm_shutdown(card);
|
||||
unlock_adapter_irq(&card->wandev.lock,&smp_flags);
|
||||
|
||||
if (card->tty_buf){
|
||||
kfree(card->tty_buf);
|
||||
card->tty_buf=NULL;
|
||||
}
|
||||
|
||||
if (card->tty_rx){
|
||||
kfree(card->tty_rx);
|
||||
card->tty_rx=NULL;
|
||||
}
|
||||
kfree(card->tty_buf);
|
||||
card->tty_buf = NULL;
|
||||
kfree(card->tty_rx);
|
||||
card->tty_rx = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -107,13 +107,9 @@ static struct x25_asy *x25_asy_alloc(void)
|
||||
static void x25_asy_free(struct x25_asy *sl)
|
||||
{
|
||||
/* Free all X.25 frame buffers. */
|
||||
if (sl->rbuff) {
|
||||
kfree(sl->rbuff);
|
||||
}
|
||||
kfree(sl->rbuff);
|
||||
sl->rbuff = NULL;
|
||||
if (sl->xbuff) {
|
||||
kfree(sl->xbuff);
|
||||
}
|
||||
kfree(sl->xbuff);
|
||||
sl->xbuff = NULL;
|
||||
|
||||
if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
|
||||
@ -134,10 +130,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
|
||||
{
|
||||
printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
|
||||
dev->name);
|
||||
if (xbuff != NULL)
|
||||
kfree(xbuff);
|
||||
if (rbuff != NULL)
|
||||
kfree(rbuff);
|
||||
kfree(xbuff);
|
||||
kfree(rbuff);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -169,10 +163,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
|
||||
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
if (xbuff != NULL)
|
||||
kfree(xbuff);
|
||||
if (rbuff != NULL)
|
||||
kfree(rbuff);
|
||||
kfree(xbuff);
|
||||
kfree(rbuff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ config PARPORT
|
||||
|
||||
config PARPORT_PC
|
||||
tristate "PC-style hardware"
|
||||
depends on PARPORT && (!SPARC64 || PCI) && (!SPARC32 || BROKEN)
|
||||
depends on PARPORT && (!SPARC64 || PCI) && !SPARC32
|
||||
---help---
|
||||
You should say Y here if you have a PC-style parallel port. All
|
||||
IBM PC compatible computers and some Alphas have PC-style
|
||||
|
@ -67,6 +67,10 @@
|
||||
|
||||
#define PARPORT_PC_MAX_PORTS PARPORT_MAX
|
||||
|
||||
#ifdef CONFIG_ISA_DMA_API
|
||||
#define HAS_DMA
|
||||
#endif
|
||||
|
||||
/* ECR modes */
|
||||
#define ECR_SPP 00
|
||||
#define ECR_PS2 01
|
||||
@ -610,6 +614,7 @@ dump_parport_state ("leave fifo_write_block_pio", port);
|
||||
return length - left;
|
||||
}
|
||||
|
||||
#ifdef HAS_DMA
|
||||
static size_t parport_pc_fifo_write_block_dma (struct parport *port,
|
||||
const void *buf, size_t length)
|
||||
{
|
||||
@ -732,6 +737,17 @@ dump_parport_state ("enter fifo_write_block_dma", port);
|
||||
dump_parport_state ("leave fifo_write_block_dma", port);
|
||||
return length - left;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline size_t parport_pc_fifo_write_block(struct parport *port,
|
||||
const void *buf, size_t length)
|
||||
{
|
||||
#ifdef HAS_DMA
|
||||
if (port->dma != PARPORT_DMA_NONE)
|
||||
return parport_pc_fifo_write_block_dma (port, buf, length);
|
||||
#endif
|
||||
return parport_pc_fifo_write_block_pio (port, buf, length);
|
||||
}
|
||||
|
||||
/* Parallel Port FIFO mode (ECP chipsets) */
|
||||
static size_t parport_pc_compat_write_block_pio (struct parport *port,
|
||||
@ -758,10 +774,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
|
||||
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
|
||||
|
||||
/* Write the data to the FIFO. */
|
||||
if (port->dma != PARPORT_DMA_NONE)
|
||||
written = parport_pc_fifo_write_block_dma (port, buf, length);
|
||||
else
|
||||
written = parport_pc_fifo_write_block_pio (port, buf, length);
|
||||
written = parport_pc_fifo_write_block(port, buf, length);
|
||||
|
||||
/* Finish up. */
|
||||
/* For some hardware we don't want to touch the mode until
|
||||
@ -856,10 +869,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
|
||||
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
|
||||
|
||||
/* Write the data to the FIFO. */
|
||||
if (port->dma != PARPORT_DMA_NONE)
|
||||
written = parport_pc_fifo_write_block_dma (port, buf, length);
|
||||
else
|
||||
written = parport_pc_fifo_write_block_pio (port, buf, length);
|
||||
written = parport_pc_fifo_write_block(port, buf, length);
|
||||
|
||||
/* Finish up. */
|
||||
/* For some hardware we don't want to touch the mode until
|
||||
@ -2285,6 +2295,7 @@ struct parport *parport_pc_probe_port (unsigned long int base,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARPORT_PC_FIFO
|
||||
#ifdef HAS_DMA
|
||||
if (p->dma != PARPORT_DMA_NONE) {
|
||||
if (request_dma (p->dma, p->name)) {
|
||||
printk (KERN_WARNING "%s: dma %d in use, "
|
||||
@ -2306,7 +2317,8 @@ struct parport *parport_pc_probe_port (unsigned long int base,
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PARPORT_PC_FIFO */
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Done probing. Now put the port into a sensible start-up state. */
|
||||
@ -2367,11 +2379,13 @@ void parport_pc_unregister_port (struct parport *p)
|
||||
if (p->modes & PARPORT_MODE_ECP)
|
||||
release_region(p->base_hi, 3);
|
||||
#ifdef CONFIG_PARPORT_PC_FIFO
|
||||
#ifdef HAS_DMA
|
||||
if (priv->dma_buf)
|
||||
pci_free_consistent(priv->dev, PAGE_SIZE,
|
||||
priv->dma_buf,
|
||||
priv->dma_handle);
|
||||
#endif /* CONFIG_PARPORT_PC_FIFO */
|
||||
#endif
|
||||
#endif
|
||||
kfree (p->private_data);
|
||||
parport_put_port(p);
|
||||
kfree (ops); /* hope no-one cached it */
|
||||
|
@ -196,7 +196,7 @@ struct ebda_hpc_bus {
|
||||
|
||||
|
||||
/********************************************************************
|
||||
* THREE TYPE OF HOT PLUG CONTROLER *
|
||||
* THREE TYPE OF HOT PLUG CONTROLLER *
|
||||
********************************************************************/
|
||||
|
||||
struct isa_ctlr_access {
|
||||
|
@ -64,7 +64,7 @@ static int to_debug = FALSE;
|
||||
#define WPG_I2C_OR 0x2000 // I2C OR operation
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Command set for I2C Master Operation Setup Regisetr
|
||||
// Command set for I2C Master Operation Setup Register
|
||||
//----------------------------------------------------------------------------
|
||||
#define WPG_READATADDR_MASK 0x00010000 // read,bytes,I2C shifted,index
|
||||
#define WPG_WRITEATADDR_MASK 0x40010000 // write,bytes,I2C shifted,index
|
||||
@ -835,7 +835,7 @@ static void poll_hpc (void)
|
||||
if (ibmphp_shutdown)
|
||||
break;
|
||||
|
||||
/* try to get the lock to do some kind of harware access */
|
||||
/* try to get the lock to do some kind of hardware access */
|
||||
down (&semOperations);
|
||||
|
||||
switch (poll_state) {
|
||||
@ -906,7 +906,7 @@ static void poll_hpc (void)
|
||||
poll_state = POLL_LATCH_REGISTER;
|
||||
break;
|
||||
}
|
||||
/* give up the harware semaphore */
|
||||
/* give up the hardware semaphore */
|
||||
up (&semOperations);
|
||||
/* sleep for a short time just for good measure */
|
||||
msleep(100);
|
||||
|
@ -1308,10 +1308,10 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function)
|
||||
/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
|
||||
} else {
|
||||
/* This is Memory */
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
|
||||
/* pfmem */
|
||||
debug ("start address of pfmem is %x\n", start_address);
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
|
||||
err ("cannot find corresponding PFMEM resource to remove\n");
|
||||
@ -1325,6 +1325,8 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function)
|
||||
} else {
|
||||
/* regular memory */
|
||||
debug ("start address of mem is %x\n", start_address);
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
|
||||
err ("cannot find corresponding MEM resource to remove\n");
|
||||
return -EIO;
|
||||
@ -1422,9 +1424,9 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
|
||||
/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
|
||||
} else {
|
||||
/* This is Memory */
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
|
||||
/* pfmem */
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
|
||||
err ("cannot find corresponding PFMEM resource to remove\n");
|
||||
return -EINVAL;
|
||||
@ -1436,6 +1438,7 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
|
||||
}
|
||||
} else {
|
||||
/* regular memory */
|
||||
start_address &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
|
||||
err ("cannot find corresponding MEM resource to remove\n");
|
||||
return -EINVAL;
|
||||
|
@ -150,7 +150,7 @@ struct hotplug_slot_info {
|
||||
* @name: the name of the slot being registered. This string must
|
||||
* be unique amoung slots registered on this system.
|
||||
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
|
||||
* @info: pointer to the &struct hotplug_slot_info for the inital values for
|
||||
* @info: pointer to the &struct hotplug_slot_info for the initial values for
|
||||
* this slot.
|
||||
* @release: called during pci_hp_deregister to free memory allocated in a
|
||||
* hotplug_slot structure.
|
||||
|
@ -90,6 +90,22 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
|
||||
.get_cur_bus_speed = get_cur_bus_speed,
|
||||
};
|
||||
|
||||
/**
|
||||
* release_slot - free up the memory used by a slot
|
||||
* @hotplug_slot: slot to free
|
||||
*/
|
||||
static void release_slot(struct hotplug_slot *hotplug_slot)
|
||||
{
|
||||
struct slot *slot = hotplug_slot->private;
|
||||
|
||||
dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
|
||||
|
||||
kfree(slot->hotplug_slot->info);
|
||||
kfree(slot->hotplug_slot->name);
|
||||
kfree(slot->hotplug_slot);
|
||||
kfree(slot);
|
||||
}
|
||||
|
||||
static int init_slots(struct controller *ctrl)
|
||||
{
|
||||
struct slot *new_slot;
|
||||
@ -139,7 +155,8 @@ static int init_slots(struct controller *ctrl)
|
||||
|
||||
/* register this slot with the hotplug pci core */
|
||||
new_slot->hotplug_slot->private = new_slot;
|
||||
make_slot_name (new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
|
||||
new_slot->hotplug_slot->release = &release_slot;
|
||||
make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
|
||||
new_slot->hotplug_slot->ops = &pciehp_hotplug_slot_ops;
|
||||
|
||||
new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status));
|
||||
@ -188,10 +205,6 @@ static int cleanup_slots (struct controller * ctrl)
|
||||
while (old_slot) {
|
||||
next_slot = old_slot->next;
|
||||
pci_hp_deregister (old_slot->hotplug_slot);
|
||||
kfree(old_slot->hotplug_slot->info);
|
||||
kfree(old_slot->hotplug_slot->name);
|
||||
kfree(old_slot->hotplug_slot);
|
||||
kfree(old_slot);
|
||||
old_slot = next_slot;
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ static int __init init_slots(void)
|
||||
hotplug_slot->ops = &skel_hotplug_slot_ops;
|
||||
|
||||
/*
|
||||
* Initilize the slot info structure with some known
|
||||
* Initialize the slot info structure with some known
|
||||
* good values.
|
||||
*/
|
||||
info->power_status = get_power_status(slot);
|
||||
|
@ -522,7 +522,7 @@ void pci_scan_msi_device(struct pci_dev *dev)
|
||||
* msi_capability_init - configure device's MSI capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI device function
|
||||
*
|
||||
* Setup the MSI capability structure of device funtion with a single
|
||||
* Setup the MSI capability structure of device function with a single
|
||||
* MSI vector, regardless of device function is capable of handling
|
||||
* multiple messages. A return of zero indicates the successful setup
|
||||
* of an entry zero with the new MSI vector or non-zero for otherwise.
|
||||
@ -599,7 +599,7 @@ static int msi_capability_init(struct pci_dev *dev)
|
||||
* msix_capability_init - configure device's MSI-X capability
|
||||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
*
|
||||
* Setup the MSI-X capability structure of device funtion with a
|
||||
* Setup the MSI-X capability structure of device function with a
|
||||
* single MSI-X vector. A return of zero indicates the successful setup of
|
||||
* requested MSI-X entries with allocated vectors or non-zero for otherwise.
|
||||
**/
|
||||
@ -1074,7 +1074,7 @@ void pci_disable_msix(struct pci_dev* dev)
|
||||
* msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
|
||||
* @dev: pointer to the pci_dev data structure of MSI(X) device function
|
||||
*
|
||||
* Being called during hotplug remove, from which the device funciton
|
||||
* Being called during hotplug remove, from which the device function
|
||||
* is hot-removed. All previous assigned MSI/MSI-X vectors, if
|
||||
* allocated for this device function, are reclaimed to unused state,
|
||||
* which may be used later on.
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
static u32 ctrlset_buf[3] = {0, 0, 0};
|
||||
static u32 global_ctrlsets = 0;
|
||||
u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
|
||||
static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
|
||||
|
||||
static acpi_status
|
||||
acpi_query_osc (
|
||||
|
@ -318,6 +318,14 @@ static int pci_device_resume(struct device * dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_device_shutdown(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
struct pci_driver *drv = pci_dev->driver;
|
||||
|
||||
if (drv && drv->shutdown)
|
||||
drv->shutdown(pci_dev);
|
||||
}
|
||||
|
||||
#define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj)
|
||||
#define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr)
|
||||
@ -373,7 +381,7 @@ pci_populate_driver_dir(struct pci_driver *drv)
|
||||
*
|
||||
* Adds the driver structure to the list of registered drivers.
|
||||
* Returns a negative value on error, otherwise 0.
|
||||
* If no error occured, the driver remains registered even if
|
||||
* If no error occurred, the driver remains registered even if
|
||||
* no device was claimed during registration.
|
||||
*/
|
||||
int pci_register_driver(struct pci_driver *drv)
|
||||
@ -385,6 +393,7 @@ int pci_register_driver(struct pci_driver *drv)
|
||||
drv->driver.bus = &pci_bus_type;
|
||||
drv->driver.probe = pci_device_probe;
|
||||
drv->driver.remove = pci_device_remove;
|
||||
drv->driver.shutdown = pci_device_shutdown,
|
||||
drv->driver.owner = drv->owner;
|
||||
drv->driver.kobj.ktype = &pci_driver_kobj_type;
|
||||
pci_init_dynids(&drv->dynids);
|
||||
|
@ -91,6 +91,7 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
|
||||
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
|
||||
unsigned int size = 64;
|
||||
loff_t init_off = off;
|
||||
u8 *data = (u8*) buf;
|
||||
|
||||
/* Several chips lock up trying to read undefined config space */
|
||||
if (capable(CAP_SYS_ADMIN)) {
|
||||
@ -108,30 +109,47 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
|
||||
size = count;
|
||||
}
|
||||
|
||||
while (off & 3) {
|
||||
unsigned char val;
|
||||
if ((off & 1) && size) {
|
||||
u8 val;
|
||||
pci_read_config_byte(dev, off, &val);
|
||||
buf[off - init_off] = val;
|
||||
data[off - init_off] = val;
|
||||
off++;
|
||||
if (--size == 0)
|
||||
break;
|
||||
size--;
|
||||
}
|
||||
|
||||
if ((off & 3) && size > 2) {
|
||||
u16 val;
|
||||
pci_read_config_word(dev, off, &val);
|
||||
data[off - init_off] = val & 0xff;
|
||||
data[off - init_off + 1] = (val >> 8) & 0xff;
|
||||
off += 2;
|
||||
size -= 2;
|
||||
}
|
||||
|
||||
while (size > 3) {
|
||||
unsigned int val;
|
||||
u32 val;
|
||||
pci_read_config_dword(dev, off, &val);
|
||||
buf[off - init_off] = val & 0xff;
|
||||
buf[off - init_off + 1] = (val >> 8) & 0xff;
|
||||
buf[off - init_off + 2] = (val >> 16) & 0xff;
|
||||
buf[off - init_off + 3] = (val >> 24) & 0xff;
|
||||
data[off - init_off] = val & 0xff;
|
||||
data[off - init_off + 1] = (val >> 8) & 0xff;
|
||||
data[off - init_off + 2] = (val >> 16) & 0xff;
|
||||
data[off - init_off + 3] = (val >> 24) & 0xff;
|
||||
off += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
while (size > 0) {
|
||||
unsigned char val;
|
||||
if (size >= 2) {
|
||||
u16 val;
|
||||
pci_read_config_word(dev, off, &val);
|
||||
data[off - init_off] = val & 0xff;
|
||||
data[off - init_off + 1] = (val >> 8) & 0xff;
|
||||
off += 2;
|
||||
size -= 2;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
u8 val;
|
||||
pci_read_config_byte(dev, off, &val);
|
||||
buf[off - init_off] = val;
|
||||
data[off - init_off] = val;
|
||||
off++;
|
||||
--size;
|
||||
}
|
||||
@ -145,6 +163,7 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
|
||||
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
|
||||
unsigned int size = count;
|
||||
loff_t init_off = off;
|
||||
u8 *data = (u8*) buf;
|
||||
|
||||
if (off > dev->cfg_size)
|
||||
return 0;
|
||||
@ -153,25 +172,40 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
|
||||
count = size;
|
||||
}
|
||||
|
||||
while (off & 3) {
|
||||
pci_write_config_byte(dev, off, buf[off - init_off]);
|
||||
if ((off & 1) && size) {
|
||||
pci_write_config_byte(dev, off, data[off - init_off]);
|
||||
off++;
|
||||
if (--size == 0)
|
||||
break;
|
||||
size--;
|
||||
}
|
||||
|
||||
if ((off & 3) && size > 2) {
|
||||
u16 val = data[off - init_off];
|
||||
val |= (u16) data[off - init_off + 1] << 8;
|
||||
pci_write_config_word(dev, off, val);
|
||||
off += 2;
|
||||
size -= 2;
|
||||
}
|
||||
|
||||
while (size > 3) {
|
||||
unsigned int val = buf[off - init_off];
|
||||
val |= (unsigned int) buf[off - init_off + 1] << 8;
|
||||
val |= (unsigned int) buf[off - init_off + 2] << 16;
|
||||
val |= (unsigned int) buf[off - init_off + 3] << 24;
|
||||
u32 val = data[off - init_off];
|
||||
val |= (u32) data[off - init_off + 1] << 8;
|
||||
val |= (u32) data[off - init_off + 2] << 16;
|
||||
val |= (u32) data[off - init_off + 3] << 24;
|
||||
pci_write_config_dword(dev, off, val);
|
||||
off += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
while (size > 0) {
|
||||
pci_write_config_byte(dev, off, buf[off - init_off]);
|
||||
if (size >= 2) {
|
||||
u16 val = data[off - init_off];
|
||||
val |= (u16) data[off - init_off + 1] << 8;
|
||||
pci_write_config_word(dev, off, val);
|
||||
off += 2;
|
||||
size -= 2;
|
||||
}
|
||||
|
||||
if (size) {
|
||||
pci_write_config_byte(dev, off, data[off - init_off]);
|
||||
off++;
|
||||
--size;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/dma.h> /* isa_dma_bridge_buggy */
|
||||
#include "pci.h"
|
||||
|
||||
|
||||
/**
|
||||
@ -398,10 +399,10 @@ pci_enable_device(struct pci_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
dev->is_enabled = 1;
|
||||
if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
|
||||
return err;
|
||||
pci_fixup_device(pci_fixup_enable, dev);
|
||||
dev->is_enabled = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -427,16 +428,15 @@ pci_disable_device(struct pci_dev *dev)
|
||||
{
|
||||
u16 pci_command;
|
||||
|
||||
dev->is_enabled = 0;
|
||||
dev->is_busmaster = 0;
|
||||
|
||||
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
|
||||
if (pci_command & PCI_COMMAND_MASTER) {
|
||||
pci_command &= ~PCI_COMMAND_MASTER;
|
||||
pci_write_config_word(dev, PCI_COMMAND, pci_command);
|
||||
}
|
||||
dev->is_busmaster = 0;
|
||||
|
||||
pcibios_disable_device(dev);
|
||||
dev->is_enabled = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -748,17 +748,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
if (!pci_dac_dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
@ -821,7 +810,6 @@ EXPORT_SYMBOL(pci_set_master);
|
||||
EXPORT_SYMBOL(pci_set_mwi);
|
||||
EXPORT_SYMBOL(pci_clear_mwi);
|
||||
EXPORT_SYMBOL(pci_set_dma_mask);
|
||||
EXPORT_SYMBOL(pci_dac_set_dma_mask);
|
||||
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
|
||||
EXPORT_SYMBOL(pci_assign_resource);
|
||||
EXPORT_SYMBOL(pci_find_parent_resource);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include "pci.h"
|
||||
|
||||
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
|
||||
#define CARDBUS_RESERVE_BUSNR 3
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include "pci.h"
|
||||
|
||||
static int proc_initialized; /* = 0 */
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include "pci.h"
|
||||
|
||||
/* Deal with broken BIOS'es that neglect to enable passive release,
|
||||
which can cause problems in combination with the 82441FX/PPro MTRRs */
|
||||
@ -328,6 +329,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi );
|
||||
|
||||
/*
|
||||
* VIA ACPI: One IO region pointed to by longword at
|
||||
|
@ -260,7 +260,7 @@ config SCSI_3W_9XXX
|
||||
|
||||
config SCSI_7000FASST
|
||||
tristate "7000FASST SCSI support"
|
||||
depends on ISA && SCSI
|
||||
depends on ISA && SCSI && ISA_DMA_API
|
||||
help
|
||||
This driver supports the Western Digital 7000 SCSI host adapter
|
||||
family. Some information is in the source:
|
||||
@ -295,7 +295,7 @@ config SCSI_AHA152X
|
||||
|
||||
config SCSI_AHA1542
|
||||
tristate "Adaptec AHA1542 support"
|
||||
depends on ISA && SCSI
|
||||
depends on ISA && SCSI && ISA_DMA_API
|
||||
---help---
|
||||
This is support for a SCSI host adapter. It is explained in section
|
||||
3.4 of the SCSI-HOWTO, available from
|
||||
@ -515,7 +515,7 @@ config SCSI_SATA_VITESSE
|
||||
|
||||
config SCSI_BUSLOGIC
|
||||
tristate "BusLogic SCSI support"
|
||||
depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64)
|
||||
depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
|
||||
---help---
|
||||
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
|
||||
Adapters. Consult the SCSI-HOWTO, available from
|
||||
@ -571,7 +571,7 @@ config SCSI_DTC3280
|
||||
|
||||
config SCSI_EATA
|
||||
tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
|
||||
depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
|
||||
depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
|
||||
---help---
|
||||
This driver supports all EATA/DMA-compliant SCSI host adapters. DPT
|
||||
ISA and all EISA I/O addresses are probed looking for the "EATA"
|
||||
@ -665,7 +665,7 @@ config SCSI_FD_MCS
|
||||
|
||||
config SCSI_GDTH
|
||||
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
|
||||
depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
|
||||
depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
|
||||
---help---
|
||||
Formerly called GDT SCSI Disk Array Controller Support.
|
||||
|
||||
@ -1416,7 +1416,7 @@ config SCSI_T128
|
||||
|
||||
config SCSI_U14_34F
|
||||
tristate "UltraStor 14F/34F support"
|
||||
depends on ISA && SCSI
|
||||
depends on ISA && SCSI && ISA_DMA_API
|
||||
---help---
|
||||
This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
|
||||
The source at <file:drivers/scsi/u14-34f.c> contains some
|
||||
|
@ -431,7 +431,7 @@ nomem:
|
||||
* (2) error, where io->status is a negative errno value. The number
|
||||
* of io->bytes transferred before the error is usually less
|
||||
* than requested, and can be nonzero.
|
||||
* (3) cancelation, a type of error with status -ECONNRESET that
|
||||
* (3) cancellation, a type of error with status -ECONNRESET that
|
||||
* is initiated by usb_sg_cancel().
|
||||
*
|
||||
* When this function returns, all memory allocated through usb_sg_init() or
|
||||
@ -1282,7 +1282,7 @@ static void release_interface(struct device *dev)
|
||||
* bus rwsem; usb device driver probe() methods cannot use this routine.
|
||||
*
|
||||
* Returns zero on success, or else the status code returned by the
|
||||
* underlying call that failed. On succesful completion, each interface
|
||||
* underlying call that failed. On successful completion, each interface
|
||||
* in the original device configuration has been destroyed, and each one
|
||||
* in the new configuration has been probed by all relevant usb device
|
||||
* drivers currently known to the kernel.
|
||||
|
@ -121,7 +121,7 @@ struct urb * usb_get_urb(struct urb *urb)
|
||||
* describing that request to the USB subsystem. Request completion will
|
||||
* be indicated later, asynchronously, by calling the completion handler.
|
||||
* The three types of completion are success, error, and unlink
|
||||
* (a software-induced fault, also called "request cancelation").
|
||||
* (a software-induced fault, also called "request cancellation").
|
||||
*
|
||||
* URBs may be submitted in interrupt context.
|
||||
*
|
||||
@ -170,7 +170,7 @@ struct urb * usb_get_urb(struct urb *urb)
|
||||
* As of Linux 2.6, all USB endpoint transfer queues support depths greater
|
||||
* than one. This was previously a HCD-specific behavior, except for ISO
|
||||
* transfers. Non-isochronous endpoint queues are inactive during cleanup
|
||||
* after faults (transfer errors or cancelation).
|
||||
* after faults (transfer errors or cancellation).
|
||||
*
|
||||
* Reserved Bandwidth Transfers:
|
||||
*
|
||||
@ -395,7 +395,7 @@ int usb_submit_urb(struct urb *urb, int mem_flags)
|
||||
*
|
||||
* This routine cancels an in-progress request. URBs complete only
|
||||
* once per submission, and may be canceled only once per submission.
|
||||
* Successful cancelation means the requests's completion handler will
|
||||
* Successful cancellation means the requests's completion handler will
|
||||
* be called with a status code indicating that the request has been
|
||||
* canceled (rather than any other code) and will quickly be removed
|
||||
* from host controller data structures.
|
||||
|
@ -569,7 +569,7 @@ static const struct usb_cdc_ether_desc ether_desc = {
|
||||
|
||||
/* include the status endpoint if we can, even where it's optional.
|
||||
* use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
|
||||
* packet, to simplify cancelation; and a big transfer interval, to
|
||||
* packet, to simplify cancellation; and a big transfer interval, to
|
||||
* waste less bandwidth.
|
||||
*
|
||||
* some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
|
||||
|
@ -275,7 +275,7 @@ static const char *CHIP;
|
||||
*
|
||||
* After opening, configure non-control endpoints. Then use normal
|
||||
* stream read() and write() requests; and maybe ioctl() to get more
|
||||
* precise FIFO status when recovering from cancelation.
|
||||
* precise FIFO status when recovering from cancellation.
|
||||
*/
|
||||
|
||||
static void epio_complete (struct usb_ep *ep, struct usb_request *req)
|
||||
|
@ -705,7 +705,7 @@ void nuke(struct lh7a40x_ep *ep, int status)
|
||||
done(ep, req, status);
|
||||
}
|
||||
|
||||
/* Disable IRQ if EP is enabled (has decriptor) */
|
||||
/* Disable IRQ if EP is enabled (has descriptor) */
|
||||
if (ep->desc)
|
||||
pio_irq_disable(ep_index(ep));
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ struct gs_dev {
|
||||
struct usb_ep *dev_notify_ep; /* address of notify endpoint */
|
||||
struct usb_ep *dev_in_ep; /* address of in endpoint */
|
||||
struct usb_ep *dev_out_ep; /* address of out endpoint */
|
||||
struct usb_endpoint_descriptor /* desciptor of notify ep */
|
||||
struct usb_endpoint_descriptor /* descriptor of notify ep */
|
||||
*dev_notify_ep_desc;
|
||||
struct usb_endpoint_descriptor /* descriptor of in endpoint */
|
||||
*dev_in_ep_desc;
|
||||
|
@ -346,6 +346,22 @@ ehci_reboot (struct notifier_block *self, unsigned long code, void *null)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
|
||||
{
|
||||
unsigned port;
|
||||
|
||||
if (!HCS_PPC (ehci->hcs_params))
|
||||
return;
|
||||
|
||||
ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
|
||||
for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
|
||||
(void) ehci_hub_control(ehci_to_hcd(ehci),
|
||||
is_on ? SetPortFeature : ClearPortFeature,
|
||||
USB_PORT_FEAT_POWER,
|
||||
port--, NULL, 0);
|
||||
msleep(20);
|
||||
}
|
||||
|
||||
|
||||
/* called by khubd or root hub init threads */
|
||||
|
||||
@ -362,8 +378,10 @@ static int ehci_hc_reset (struct usb_hcd *hcd)
|
||||
dbg_hcs_params (ehci, "reset");
|
||||
dbg_hcc_params (ehci, "reset");
|
||||
|
||||
/* cache this readonly data; minimize chip reads */
|
||||
ehci->hcs_params = readl (&ehci->caps->hcs_params);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* EHCI 0.96 and later may have "extended capabilities" */
|
||||
if (hcd->self.controller->bus == &pci_bus_type) {
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
|
||||
@ -383,9 +401,30 @@ static int ehci_hc_reset (struct usb_hcd *hcd)
|
||||
break;
|
||||
}
|
||||
|
||||
/* optional debug port, normally in the first BAR */
|
||||
temp = pci_find_capability (pdev, 0x0a);
|
||||
if (temp) {
|
||||
pci_read_config_dword(pdev, temp, &temp);
|
||||
temp >>= 16;
|
||||
if ((temp & (3 << 13)) == (1 << 13)) {
|
||||
temp &= 0x1fff;
|
||||
ehci->debug = hcd->regs + temp;
|
||||
temp = readl (&ehci->debug->control);
|
||||
ehci_info (ehci, "debug port %d%s\n",
|
||||
HCS_DEBUG_PORT(ehci->hcs_params),
|
||||
(temp & DBGP_ENABLED)
|
||||
? " IN USE"
|
||||
: "");
|
||||
if (!(temp & DBGP_ENABLED))
|
||||
ehci->debug = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
temp = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params));
|
||||
} else
|
||||
temp = 0;
|
||||
|
||||
/* EHCI 0.96 and later may have "extended capabilities" */
|
||||
while (temp && count--) {
|
||||
u32 cap;
|
||||
|
||||
@ -414,8 +453,7 @@ static int ehci_hc_reset (struct usb_hcd *hcd)
|
||||
ehci_reset (ehci);
|
||||
#endif
|
||||
|
||||
/* cache this readonly data; minimize PCI reads */
|
||||
ehci->hcs_params = readl (&ehci->caps->hcs_params);
|
||||
ehci_port_power (ehci, 0);
|
||||
|
||||
/* at least the Genesys GL880S needs fixup here */
|
||||
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
|
||||
@ -657,16 +695,11 @@ done2:
|
||||
static void ehci_stop (struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
u8 rh_ports, port;
|
||||
|
||||
ehci_dbg (ehci, "stop\n");
|
||||
|
||||
/* Turn off port power on all root hub ports. */
|
||||
rh_ports = HCS_N_PORTS (ehci->hcs_params);
|
||||
for (port = 1; port <= rh_ports; port++)
|
||||
(void) ehci_hub_control(hcd,
|
||||
ClearPortFeature, USB_PORT_FEAT_POWER,
|
||||
port, NULL, 0);
|
||||
ehci_port_power (ehci, 0);
|
||||
|
||||
/* no more interrupts ... */
|
||||
del_timer_sync (&ehci->watchdog);
|
||||
@ -748,7 +781,6 @@ static int ehci_resume (struct usb_hcd *hcd)
|
||||
unsigned port;
|
||||
struct usb_device *root = hcd->self.root_hub;
|
||||
int retval = -EINVAL;
|
||||
int powerup = 0;
|
||||
|
||||
// maybe restore (PCI) FLADJ
|
||||
|
||||
@ -766,8 +798,6 @@ static int ehci_resume (struct usb_hcd *hcd)
|
||||
up (&hcd->self.root_hub->serialize);
|
||||
break;
|
||||
}
|
||||
if ((status & PORT_POWER) == 0)
|
||||
powerup = 1;
|
||||
if (!root->children [port])
|
||||
continue;
|
||||
dbg_port (ehci, __FUNCTION__, port + 1, status);
|
||||
@ -794,16 +824,9 @@ static int ehci_resume (struct usb_hcd *hcd)
|
||||
retval = ehci_start (hcd);
|
||||
|
||||
/* here we "know" root ports should always stay powered;
|
||||
* but some controllers may lost all power.
|
||||
* but some controllers may lose all power.
|
||||
*/
|
||||
if (powerup) {
|
||||
ehci_dbg (ehci, "...powerup ports...\n");
|
||||
for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
|
||||
(void) ehci_hub_control(hcd,
|
||||
SetPortFeature, USB_PORT_FEAT_POWER,
|
||||
port--, NULL, 0);
|
||||
msleep(20);
|
||||
}
|
||||
ehci_port_power (ehci, 1);
|
||||
}
|
||||
|
||||
return retval;
|
||||
|
@ -281,6 +281,8 @@ ehci_hub_descriptor (
|
||||
temp = 0x0008; /* per-port overcurrent reporting */
|
||||
if (HCS_PPC (ehci->hcs_params))
|
||||
temp |= 0x0001; /* per-port power control */
|
||||
else
|
||||
temp |= 0x0002; /* no power switching */
|
||||
#if 0
|
||||
// re-enable when we support USB_PORT_FEAT_INDICATOR below.
|
||||
if (HCS_INDICATOR (ehci->hcs_params))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user