2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00

Merge branch 'acpi-scan' into acpi-pm

Conflicts:
	drivers/acpi/scan.c

The conflict is resolved by moving the just introduced
acpi_device_is_first_physical_node() to bus.c and using
the existing acpi_companion_match() from there.

There will be an additional commit to combine the two.
This commit is contained in:
Rafael J. Wysocki 2015-07-29 23:57:51 +02:00
commit 3431e490b5
192 changed files with 3017 additions and 2212 deletions

View File

@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
$(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
mode) if this option is supported by $(AR). mode) if this option is supported by $(AR).
ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
These variables are appended to the KBUILD_CPPFLAGS,
KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
top-level Makefile has set any other flags. This provides a
means for an architecture to override the defaults.
--- 6.2 Add prerequisites to archheaders: --- 6.2 Add prerequisites to archheaders:
The archheaders: rule is used to generate header files that The archheaders: rule is used to generate header files that

View File

@ -7019,6 +7019,7 @@ F: include/uapi/linux/netfilter/
F: net/*/netfilter.c F: net/*/netfilter.c
F: net/*/netfilter/ F: net/*/netfilter/
F: net/netfilter/ F: net/netfilter/
F: net/bridge/br_netfilter*.c
NETLABEL NETLABEL
M: Paul Moore <paul@paul-moore.com> M: Paul Moore <paul@paul-moore.com>

View File

@ -780,10 +780,11 @@ endif
include scripts/Makefile.kasan include scripts/Makefile.kasan
include scripts/Makefile.extrawarn include scripts/Makefile.extrawarn
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
KBUILD_CPPFLAGS += $(KCPPFLAGS) # last assignments
KBUILD_AFLAGS += $(KAFLAGS) KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
KBUILD_CFLAGS += $(KCFLAGS) KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
# Use --build-id when available. # Use --build-id when available.
LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\

View File

@ -115,6 +115,7 @@ if ISA_ARCOMPACT
config ARC_CPU_750D config ARC_CPU_750D
bool "ARC750D" bool "ARC750D"
select ARC_CANT_LLSC
help help
Support for ARC750 core Support for ARC750 core
@ -362,7 +363,7 @@ config ARC_CANT_LLSC
config ARC_HAS_LLSC config ARC_HAS_LLSC
bool "Insn: LLOCK/SCOND (efficient atomic ops)" bool "Insn: LLOCK/SCOND (efficient atomic ops)"
default y default y
depends on !ARC_CPU_750D && !ARC_CANT_LLSC depends on !ARC_CANT_LLSC
config ARC_HAS_SWAPE config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)" bool "Insn: SWAPE (endian-swap)"

View File

@ -49,7 +49,8 @@ endif
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3 # Generic build system uses -O2, we want -O3
cflags-y += -O3 # Note: No need to add to cflags-y as that happens anyways
ARCH_CFLAGS += -O3
endif endif
# small data is default for elf32 tool-chain. If not usable, disable it # small data is default for elf32 tool-chain. If not usable, disable it

View File

@ -12,7 +12,7 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <75000000>; clock-frequency = <90000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;

View File

@ -12,7 +12,7 @@
/ { / {
compatible = "snps,arc"; compatible = "snps,arc";
clock-frequency = <75000000>; clock-frequency = <90000000>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;

View File

@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
* done for const @nr, but no code is generated due to gcc \ * done for const @nr, but no code is generated due to gcc \
* const prop. \ * const prop. \
*/ \ */ \
if (__builtin_constant_p(nr)) \ nr &= 0x1f; \
nr &= 0x1f; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: llock %0, [%1] \n" \ "1: llock %0, [%1] \n" \
@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
\ \
m += nr >> 5; \ m += nr >> 5; \
\ \
if (__builtin_constant_p(nr)) \ nr &= 0x1f; \
nr &= 0x1f; \
\ \
/* \ /* \
* Explicit full memory barrier needed before/after as \ * Explicit full memory barrier needed before/after as \
@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long temp, flags; \ unsigned long temp, flags; \
m += nr >> 5; \ m += nr >> 5; \
\ \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
\
/* \ /* \
* spin lock/unlock provide the needed smp_mb() before/after \ * spin lock/unlock provide the needed smp_mb() before/after \
*/ \ */ \
bitops_lock(flags); \ bitops_lock(flags); \
\ \
temp = *m; \ temp = *m; \
*m = temp c_op (1UL << nr); \ *m = temp c_op (1UL << (nr & 0x1f)); \
\ \
bitops_unlock(flags); \ bitops_unlock(flags); \
} }
@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
unsigned long old, flags; \ unsigned long old, flags; \
m += nr >> 5; \ m += nr >> 5; \
\ \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
\
bitops_lock(flags); \ bitops_lock(flags); \
\ \
old = *m; \ old = *m; \
*m = old c_op (1 << nr); \ *m = old c_op (1UL << (nr & 0x1f)); \
\ \
bitops_unlock(flags); \ bitops_unlock(flags); \
\ \
return (old & (1 << nr)) != 0; \ return (old & (1UL << (nr & 0x1f))) != 0; \
} }
#endif /* CONFIG_ARC_HAS_LLSC */ #endif /* CONFIG_ARC_HAS_LLSC */
@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
unsigned long temp; \ unsigned long temp; \
m += nr >> 5; \ m += nr >> 5; \
\ \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
\
temp = *m; \ temp = *m; \
*m = temp c_op (1UL << nr); \ *m = temp c_op (1UL << (nr & 0x1f)); \
} }
#define __TEST_N_BIT_OP(op, c_op, asm_op) \ #define __TEST_N_BIT_OP(op, c_op, asm_op) \
@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
unsigned long old; \ unsigned long old; \
m += nr >> 5; \ m += nr >> 5; \
\ \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
\
old = *m; \ old = *m; \
*m = old c_op (1 << nr); \ *m = old c_op (1UL << (nr & 0x1f)); \
\ \
return (old & (1 << nr)) != 0; \ return (old & (1UL << (nr & 0x1f))) != 0; \
} }
#define BIT_OPS(op, c_op, asm_op) \ #define BIT_OPS(op, c_op, asm_op) \
@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
addr += nr >> 5; addr += nr >> 5;
if (__builtin_constant_p(nr)) mask = 1UL << (nr & 0x1f);
nr &= 0x1f;
mask = 1 << nr;
return ((mask & *addr) != 0); return ((mask & *addr) != 0);
} }

View File

@ -16,12 +16,15 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
#ifdef CONFIG_ARC_HAS_LLSC
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ld %1, [%2] \n" \ "1: llock %1, [%2] \n" \
insn "\n" \ insn "\n" \
"2: st %0, [%2] \n" \ "2: scond %0, [%2] \n" \
" bnz 1b \n" \
" mov %0, 0 \n" \ " mov %0, 0 \n" \
"3: \n" \ "3: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
@ -39,6 +42,33 @@
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory")
#else /* !CONFIG_ARC_HAS_LLSC */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\
__asm__ __volatile__( \
"1: ld %1, [%2] \n" \
insn "\n" \
"2: st %0, [%2] \n" \
" mov %0, 0 \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
"4: mov %0, %4 \n" \
" b 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .align 4 \n" \
" .word 1b, 4b \n" \
" .word 2b, 4b \n" \
" .previous \n" \
\
: "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory")
#endif
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
pagefault_disable(); pagefault_disable();
/* TBD : can use llock/scond */
__asm__ __volatile__( __asm__ __volatile__(
"1: ld %0, [%3] \n" #ifdef CONFIG_ARC_HAS_LLSC
" brne %0, %1, 3f \n" "1: llock %0, [%3] \n"
"2: st %2, [%3] \n" " brne %0, %1, 3f \n"
"2: scond %2, [%3] \n"
" bnz 1b \n"
#else
"1: ld %0, [%3] \n"
" brne %0, %1, 3f \n"
"2: st %2, [%3] \n"
#endif
"3: \n" "3: \n"
" .section .fixup,\"ax\" \n" " .section .fixup,\"ax\" \n"
"4: mov %0, %4 \n" "4: mov %0, %4 \n"

View File

@ -106,7 +106,7 @@ struct callee_regs {
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
}; };
#define instruction_pointer(regs) ((regs)->ret) #define instruction_pointer(regs) (unsigned long)((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */ /* return 1 if user mode or 0 if kernel mode */

View File

@ -12,7 +12,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include "../../drivers/irqchip/irqchip.h"
#include <asm/irq.h> #include <asm/irq.h>
/* /*

View File

@ -12,7 +12,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include "../../drivers/irqchip/irqchip.h"
#include <asm/irq.h> #include <asm/irq.h>
/* /*

View File

@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include "../../drivers/irqchip/irqchip.h"
/* /*
* Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&mcip_lock, flags); raw_spin_unlock_irqrestore(&mcip_lock, flags);
} }
#ifdef CONFIG_SMP
static int static int
idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
{ {
unsigned long flags;
cpumask_t online;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
return -EINVAL;
raw_spin_lock_irqsave(&mcip_lock, flags);
idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
return IRQ_SET_MASK_OK; return IRQ_SET_MASK_OK;
} }
#endif
static struct irq_chip idu_irq_chip = { static struct irq_chip idu_irq_chip = {
.name = "MCIP IDU Intc", .name = "MCIP IDU Intc",
@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
if (!i) if (!i)
idu_first_irq = irq; idu_first_irq = irq;
irq_set_handler_data(irq, domain); irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
irq_set_chained_handler(irq, idu_cascade_isr);
} }
__mcip_cmd(CMD_IDU_ENABLE, 0); __mcip_cmd(CMD_IDU_ENABLE, 0);

View File

@ -142,17 +142,22 @@ static void read_arc_build_cfg_regs(void)
} }
static const struct cpuinfo_data arc_cpu_tbl[] = { static const struct cpuinfo_data arc_cpu_tbl[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ {0x20, "ARC 600" }, 0x2F}, { {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33}, { {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34}, { {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35}, { {0x35, "ARC 700 R4.11"}, 0x35},
{ {0x50, "ARC HS38" }, 0x51}, #else
{ {0x50, "ARC HS38 R2.0"}, 0x51},
{ {0x52, "ARC HS38 R2.1"}, 0x52},
#endif
{ {0x00, NULL } } { {0x00, NULL } }
}; };
#define IS_AVAIL1(v, str) ((v) ? str : "") #define IS_AVAIL1(v, s) ((v) ? s : "")
#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") #define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{ {
@ -226,7 +231,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
} }
n += scnprintf(buf + n, len - n, "%s", n += scnprintf(buf + n, len - n, "%s",
IS_USED(CONFIG_ARC_HAS_HW_MPY)); IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
} }
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",

View File

@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
static void print_task_path_n_nm(struct task_struct *tsk, char *buf) static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
{ {
struct path path;
char *path_nm = NULL; char *path_nm = NULL;
struct mm_struct *mm; struct mm_struct *mm;
struct file *exe_file; struct file *exe_file;

View File

@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
{ {
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2
/*
* SLC is shared between all cores and concurrent aux operations from
* multiple cores need to be serialized using a spinlock
* A concurrent operation can be silently ignored and/or the old/new
* operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
* below)
*/
static DEFINE_SPINLOCK(lock);
unsigned long flags; unsigned long flags;
unsigned int ctrl; unsigned int ctrl;
local_irq_save(flags); spin_lock_irqsave(&lock, flags);
/* /*
* The Region Flush operation is specified by CTRL.RGN_OP[11..9] * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
local_irq_restore(flags); spin_unlock_irqrestore(&lock, flags);
#endif #endif
} }

View File

@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
/* This is kernel Virtual address (0x7000_0000 based) */ /* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size); kvaddr = ioremap_nocache((unsigned long)paddr, size);
if (kvaddr != NULL) if (kvaddr == NULL)
memset(kvaddr, 0, size); return NULL;
/* This is bus address, platform dependent */ /* This is bus address, platform dependent */
*dma_handle = (dma_addr_t)paddr; *dma_handle = (dma_addr_t)paddr;

View File

@ -686,7 +686,8 @@
&dcan1 { &dcan1 {
status = "ok"; status = "ok";
pinctrl-names = "default", "sleep"; pinctrl-names = "default", "sleep", "active";
pinctrl-0 = <&dcan1_pins_default>; pinctrl-0 = <&dcan1_pins_sleep>;
pinctrl-1 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>;
pinctrl-2 = <&dcan1_pins_default>;
}; };

View File

@ -587,9 +587,10 @@
&dcan1 { &dcan1 {
status = "ok"; status = "ok";
pinctrl-names = "default", "sleep"; pinctrl-names = "default", "sleep", "active";
pinctrl-0 = <&dcan1_pins_default>; pinctrl-0 = <&dcan1_pins_sleep>;
pinctrl-1 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>;
pinctrl-2 = <&dcan1_pins_default>;
}; };
&qspi { &qspi {

View File

@ -125,6 +125,13 @@ endif # M68KCLASSIC
if COLDFIRE if COLDFIRE
choice
prompt "ColdFire SoC type"
default M520x
help
Select the type of ColdFire System-on-Chip (SoC) that you want
to build for.
config M5206 config M5206
bool "MCF5206" bool "MCF5206"
depends on !MMU depends on !MMU
@ -174,9 +181,6 @@ config M525x
help help
Freescale (Motorola) Coldfire 5251/5253 processor support. Freescale (Motorola) Coldfire 5251/5253 processor support.
config M527x
bool
config M5271 config M5271
bool "MCF5271" bool "MCF5271"
depends on !MMU depends on !MMU
@ -223,9 +227,6 @@ config M5307
help help
Motorola ColdFire 5307 processor support. Motorola ColdFire 5307 processor support.
config M53xx
bool
config M532x config M532x
bool "MCF532x" bool "MCF532x"
depends on !MMU depends on !MMU
@ -251,9 +252,6 @@ config M5407
help help
Motorola ColdFire 5407 processor support. Motorola ColdFire 5407 processor support.
config M54xx
bool
config M547x config M547x
bool "MCF547x" bool "MCF547x"
select M54xx select M54xx
@ -280,6 +278,17 @@ config M5441x
help help
Freescale Coldfire 54410/54415/54416/54417/54418 processor support. Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
endchoice
config M527x
bool
config M53xx
bool
config M54xx
bool
endif # COLDFIRE endif # COLDFIRE
@ -416,22 +425,18 @@ config HAVE_MBAR
config HAVE_IPSBAR config HAVE_IPSBAR
bool bool
config CLOCK_SET
bool "Enable setting the CPU clock frequency"
depends on COLDFIRE
default n
help
On some CPU's you do not need to know what the core CPU clock
frequency is. On these you can disable clock setting. On some
traditional 68K parts, and on all ColdFire parts you need to set
the appropriate CPU clock frequency. On these devices many of the
onboard peripherals derive their timing from the master CPU clock
frequency.
config CLOCK_FREQ config CLOCK_FREQ
int "Set the core clock frequency" int "Set the core clock frequency"
default "25000000" if M5206
default "54000000" if M5206e
default "166666666" if M520x
default "140000000" if M5249
default "150000000" if M527x || M523x
default "90000000" if M5307
default "50000000" if M5407
default "266000000" if M54xx
default "66666666" default "66666666"
depends on CLOCK_SET depends on COLDFIRE
help help
Define the CPU clock frequency in use. This is the core clock Define the CPU clock frequency in use. This is the core clock
frequency, it may or may not be the same as the external clock frequency, it may or may not be the same as the external clock

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -16,17 +12,12 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
CONFIG_M520x=y # CONFIG_MMU is not set
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=166666666
CONFIG_CLOCK_DIV=2
CONFIG_M5208EVB=y
# CONFIG_4KSTACKS is not set # CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x40000000 CONFIG_RAMBASE=0x40000000
CONFIG_RAMSIZE=0x2000000 CONFIG_RAMSIZE=0x2000000
CONFIG_VECTORBASE=0x40000000 CONFIG_VECTORBASE=0x40000000
CONFIG_KERNELBASE=0x40020000 CONFIG_KERNELBASE=0x40020000
CONFIG_RAM16BIT=y
CONFIG_BINFMT_FLAT=y CONFIG_BINFMT_FLAT=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
@ -40,24 +31,19 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_FEC=y CONFIG_FEC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_BAUDRATE=115200
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_FULLDEBUG=y
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
CONFIG_FULLDEBUG=y

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -16,10 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_M5249=y CONFIG_M5249=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=140000000
CONFIG_CLOCK_DIV=2
CONFIG_M5249C3=y CONFIG_M5249C3=y
CONFIG_RAMBASE=0x00000000 CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000 CONFIG_RAMSIZE=0x00800000
@ -38,23 +32,18 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y CONFIG_PPP=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
# CONFIG_CRC32 is not set

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -16,8 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_M5272=y CONFIG_M5272=y
CONFIG_CLOCK_SET=y
CONFIG_M5272C3=y CONFIG_M5272C3=y
CONFIG_RAMBASE=0x00000000 CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000 CONFIG_RAMSIZE=0x00800000
@ -36,23 +32,18 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_FEC=y CONFIG_FEC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -16,11 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_M5275=y CONFIG_M5275=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=150000000
CONFIG_CLOCK_DIV=2
CONFIG_M5275EVB=y
# CONFIG_4KSTACKS is not set # CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x00000000 CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00000000 CONFIG_RAMSIZE=0x00000000
@ -39,24 +32,19 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_FEC=y CONFIG_FEC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y CONFIG_PPP=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
# CONFIG_CRC32 is not set

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -16,10 +12,8 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_M5307=y CONFIG_M5307=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=90000000
CONFIG_CLOCK_DIV=2
CONFIG_M5307C3=y CONFIG_M5307C3=y
CONFIG_RAMBASE=0x00000000 CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00800000 CONFIG_RAMSIZE=0x00800000
@ -38,16 +32,11 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y CONFIG_PPP=y
CONFIG_SLIP=y CONFIG_SLIP=y
CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_COMPRESSED=y
@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set # CONFIG_DNOTIFY is not set
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_FULLDEBUG=y
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
# CONFIG_CRC32 is not set CONFIG_FULLDEBUG=y

View File

@ -1,10 +1,6 @@
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_M5407=y CONFIG_M5407=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=50000000
CONFIG_M5407C3=y CONFIG_M5407C3=y
CONFIG_RAMBASE=0x00000000 CONFIG_RAMBASE=0x00000000
CONFIG_RAMSIZE=0x00000000 CONFIG_RAMSIZE=0x00000000
@ -38,22 +33,17 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=y CONFIG_PPP=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_VT is not set # CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_CONSOLE=y CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
# CONFIG_CRC32 is not set

View File

@ -1,11 +1,7 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_SYSCTL_SYSCALL=y CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set # CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set # CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set # CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set # CONFIG_SIGNALFD is not set
@ -20,19 +16,16 @@ CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set # CONFIG_IOSCHED_CFQ is not set
CONFIG_COLDFIRE=y CONFIG_COLDFIRE=y
CONFIG_M547x=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=266000000
# CONFIG_4KSTACKS is not set # CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x0 CONFIG_RAMBASE=0x0
CONFIG_RAMSIZE=0x2000000 CONFIG_RAMSIZE=0x2000000
CONFIG_VECTORBASE=0x0 CONFIG_VECTORBASE=0x0
CONFIG_MBAR=0xff000000 CONFIG_MBAR=0xff000000
CONFIG_KERNELBASE=0x20000 CONFIG_KERNELBASE=0x20000
CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_JEDECPROBE=y

View File

@ -19,7 +19,7 @@
* in any case new boards come along from time to time that have yet * in any case new boards come along from time to time that have yet
* another different clocking frequency. * another different clocking frequency.
*/ */
#ifdef CONFIG_CLOCK_SET #ifdef CONFIG_CLOCK_FREQ
#define MCF_CLK CONFIG_CLOCK_FREQ #define MCF_CLK CONFIG_CLOCK_FREQ
#else #else
#error "Don't know what your ColdFire CPU clock frequency is??" #error "Don't know what your ColdFire CPU clock frequency is??"

View File

@ -413,7 +413,8 @@ static inline void isa_delay(void)
#define writew(val, addr) out_le16((addr), (val)) #define writew(val, addr) out_le16((addr), (val))
#endif /* CONFIG_ATARI_ROM_ISA */ #endif /* CONFIG_ATARI_ROM_ISA */
#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) #if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \
!(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE))
/* /*
* We need to define dummy functions for GENERIC_IOMAP support. * We need to define dummy functions for GENERIC_IOMAP support.
*/ */

View File

@ -57,7 +57,10 @@ union ctlreg0 {
unsigned long lap : 1; /* Low-address-protection control */ unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4; unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */ unsigned long edat : 1; /* Enhanced-DAT-enablement control */
unsigned long : 23; unsigned long : 4;
unsigned long afp : 1; /* AFP-register control */
unsigned long vx : 1; /* Vector enablement control */
unsigned long : 17;
}; };
}; };

View File

@ -87,7 +87,15 @@ struct sf_raw_sample {
} __packed; } __packed;
/* Perf hardware reserve and release functions */ /* Perf hardware reserve and release functions */
#ifdef CONFIG_PERF_EVENTS
int perf_reserve_sampling(void); int perf_reserve_sampling(void);
void perf_release_sampling(void); void perf_release_sampling(void);
#else /* CONFIG_PERF_EVENTS */
static inline int perf_reserve_sampling(void)
{
return 0;
}
static inline void perf_release_sampling(void) {}
#endif /* CONFIG_PERF_EVENTS */
#endif /* _ASM_S390_PERF_EVENT_H */ #endif /* _ASM_S390_PERF_EVENT_H */

View File

@ -21,6 +21,7 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/crw.h> #include <asm/crw.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/ctl_reg.h>
struct mcck_struct { struct mcck_struct {
int kill_task; int kill_task;
@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
} else } else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
asm volatile( if (!MACHINE_HAS_VX) {
" ld 0,0(%0)\n" /* Revalidate floating point registers */
" ld 1,8(%0)\n" asm volatile(
" ld 2,16(%0)\n" " ld 0,0(%0)\n"
" ld 3,24(%0)\n" " ld 1,8(%0)\n"
" ld 4,32(%0)\n" " ld 2,16(%0)\n"
" ld 5,40(%0)\n" " ld 3,24(%0)\n"
" ld 6,48(%0)\n" " ld 4,32(%0)\n"
" ld 7,56(%0)\n" " ld 5,40(%0)\n"
" ld 8,64(%0)\n" " ld 6,48(%0)\n"
" ld 9,72(%0)\n" " ld 7,56(%0)\n"
" ld 10,80(%0)\n" " ld 8,64(%0)\n"
" ld 11,88(%0)\n" " ld 9,72(%0)\n"
" ld 12,96(%0)\n" " ld 10,80(%0)\n"
" ld 13,104(%0)\n" " ld 11,88(%0)\n"
" ld 14,112(%0)\n" " ld 12,96(%0)\n"
" ld 15,120(%0)\n" " ld 13,104(%0)\n"
: : "a" (fpt_save_area)); " ld 14,112(%0)\n"
/* Revalidate vector registers */ " ld 15,120(%0)\n"
if (MACHINE_HAS_VX && current->thread.vxrs) { : : "a" (fpt_save_area));
} else {
/* Revalidate vector registers */
union ctlreg0 cr0;
if (!mci->vr) { if (!mci->vr) {
/* /*
* Vector registers can't be restored and therefore * Vector registers can't be restored and therefore
@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/ */
kill_task = 1; kill_task = 1;
} }
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
__ctl_load(cr0.val, 0, 0);
restore_vx_regs((__vector128 *) restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr); &S390_lowcore.vector_save_area);
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
} }
/* Revalidate access registers */ /* Revalidate access registers */
asm volatile( asm volatile(

View File

@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
asmlinkage void execve_tail(void) asmlinkage void execve_tail(void)
{ {
current->thread.fp_regs.fpc = 0; current->thread.fp_regs.fpc = 0;
asm volatile("sfpc %0,%0" : : "d" (0)); asm volatile("sfpc %0" : : "d" (0));
} }
/* /*

View File

@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
jno .Lesa2 jno .Lesa2
ahi %r15,-80 ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves stmh %r6,%r15,96(%r15) # store upper register halves
basr %r13,0
lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
.Lesa2: .Lesa2:
lr %r10,%r2 # save string pointer lr %r10,%r2 # save string pointer
lhi %r2,0 lhi %r2,0
@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
.Lesa3: .Lesa3:
lm %r6,%r15,120(%r15) # restore registers lm %r6,%r15,120(%r15) # restore registers
br %r14 br %r14
.Lzeroes:
.fill 64,4,0
.LwritedataS4: .LwritedataS4:
.long 0x00760005 # SCLP command for write data .long 0x00760005 # SCLP command for write data

View File

@ -16,6 +16,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/perf_event.h>
#include "../../../drivers/oprofile/oprof.h" #include "../../../drivers/oprofile/oprof.h"

View File

@ -25,36 +25,9 @@
#if IS_ENABLED(CONFIG_INTEL_PMC_IPC) #if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
/*
* intel_pmc_ipc_simple_command
* @cmd: command
* @sub: sub type
*/
int intel_pmc_ipc_simple_command(int cmd, int sub); int intel_pmc_ipc_simple_command(int cmd, int sub);
/*
* intel_pmc_ipc_raw_cmd
* @cmd: command
* @sub: sub type
* @in: input data
* @inlen: input length in bytes
* @out: output data
* @outlen: output length in dwords
* @sptr: data writing to SPTR register
* @dptr: data writing to DPTR register
*/
int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen, u32 dptr, u32 sptr); u32 *out, u32 outlen, u32 dptr, u32 sptr);
/*
* intel_pmc_ipc_command
* @cmd: command
* @sub: sub type
* @in: input data
* @inlen: input length in bytes
* @out: output data
* @outlen: output length in dwords
*/
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen); u32 *out, u32 outlen);

View File

@ -604,6 +604,8 @@ struct kvm_arch {
bool iommu_noncoherent; bool iommu_noncoherent;
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count; atomic_t noncoherent_dma_count;
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
atomic_t assigned_device_count;
struct kvm_pic *vpic; struct kvm_pic *vpic;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_pit *vpit; struct kvm_pit *vpit;

View File

@ -108,6 +108,8 @@
#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
/* Support for a virtual guest idle state is available */ /* Support for a virtual guest idle state is available */
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) #define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
/* Guest crash data handler available */
#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
/* /*
* Implementation recommendations. Indicates which behaviors the hypervisor * Implementation recommendations. Indicates which behaviors the hypervisor

View File

@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
best->ebx = xstate_required_size(vcpu->arch.xcr0, true); best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
if (vcpu->arch.eager_fpu)
kvm_x86_ops->fpu_activate(vcpu);
/* /*
* The existing code assumes virtual address is 48-bit in the canonical * The existing code assumes virtual address is 48-bit in the canonical

View File

@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
goto out_unmap; goto out_unmap;
} }
kvm_arch_start_assignment(kvm);
pci_set_dev_assigned(pdev); pci_set_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm assign device\n"); dev_info(&pdev->dev, "kvm assign device\n");
@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
iommu_detach_device(domain, &pdev->dev); iommu_detach_device(domain, &pdev->dev);
pci_clear_dev_assigned(pdev); pci_clear_dev_assigned(pdev);
kvm_arch_end_assignment(kvm);
dev_info(&pdev->dev, "kvm deassign device\n"); dev_info(&pdev->dev, "kvm deassign device\n");

View File

@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
return 0; return 0;
} }
static bool kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
return true;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level, unsigned pte_access, int level,
gfn_t gfn, pfn_t pfn, bool speculative, gfn_t gfn, pfn_t pfn, bool speculative,
@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= PT_PAGE_SIZE_MASK; spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled) if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
kvm_is_reserved_pfn(pfn)); kvm_is_mmio_pfn(pfn));
if (host_writable) if (host_writable)
spte |= SPTE_HOST_WRITEABLE; spte |= SPTE_HOST_WRITEABLE;

View File

@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
} }
#define MTRR_TYPE_UC_MINUS 7
#define MTRR2PROTVAL_INVALID 0xff
static u8 mtrr2protval[8];
static u8 fallback_mtrr_type(int mtrr)
{
/*
* WT and WP aren't always available in the host PAT. Treat
* them as UC and UC- respectively. Everything else should be
* there.
*/
switch (mtrr)
{
case MTRR_TYPE_WRTHROUGH:
return MTRR_TYPE_UNCACHABLE;
case MTRR_TYPE_WRPROT:
return MTRR_TYPE_UC_MINUS;
default:
BUG();
}
}
static void build_mtrr2protval(void)
{
int i;
u64 pat;
for (i = 0; i < 8; i++)
mtrr2protval[i] = MTRR2PROTVAL_INVALID;
/* Ignore the invalid MTRR types. */
mtrr2protval[2] = 0;
mtrr2protval[3] = 0;
/*
* Use host PAT value to figure out the mapping from guest MTRR
* values to nested page table PAT/PCD/PWT values. We do not
* want to change the host PAT value every time we enter the
* guest.
*/
rdmsrl(MSR_IA32_CR_PAT, pat);
for (i = 0; i < 8; i++) {
u8 mtrr = pat >> (8 * i);
if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
mtrr2protval[mtrr] = __cm_idx2pte(i);
}
for (i = 0; i < 8; i++) {
if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
u8 fallback = fallback_mtrr_type(i);
mtrr2protval[i] = mtrr2protval[fallback];
BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
}
}
}
static __init int svm_hardware_setup(void) static __init int svm_hardware_setup(void)
{ {
int cpu; int cpu;
@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
} else } else
kvm_disable_tdp(); kvm_disable_tdp();
build_mtrr2protval();
return 0; return 0;
err: err:
@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc; return target_tsc - tsc;
} }
static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
/* Unlike Intel, AMD takes the guest's CR0.CD into account.
*
* AMD does not have IPAT. To emulate it for the case of guests
* with no assigned devices, just set everything to WB. If guests
* have assigned devices, however, we cannot force WB for RAM
* pages only, so use the guest PAT directly.
*/
if (!kvm_arch_has_assigned_device(vcpu->kvm))
*g_pat = 0x0606060606060606;
else
*g_pat = vcpu->arch.pat;
}
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
u8 mtrr;
/*
* 1. MMIO: trust guest MTRR, so same as item 3.
* 2. No passthrough: always map as WB, and force guest PAT to WB as well
* 3. Passthrough: can't guarantee the result, try to trust guest.
*/
if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
return mtrr2protval[mtrr];
}
static void init_vmcb(struct vcpu_svm *svm, bool init_event) static void init_vmcb(struct vcpu_svm *svm, bool init_event)
{ {
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat; save->g_pat = svm->vcpu.arch.pat;
svm_set_guest_pat(svm, &save->g_pat);
save->cr3 = 0; save->cr3 = 0;
save->cr4 = 0; save->cr4 = 0;
} }
@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE: case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break; break;
case MSR_IA32_CR_PAT:
if (npt_enabled) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
return 1;
vcpu->arch.pat = data;
svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
mark_dirty(svm->vmcb, VMCB_NPT);
break;
}
/* fall through */
default: default:
return kvm_set_msr_common(vcpu, msr); return kvm_set_msr_common(vcpu, msr);
} }
@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
return true; return true;
} }
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
return 0;
}
static void svm_cpuid_update(struct kvm_vcpu *vcpu) static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{ {
} }

View File

@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
u64 ipat = 0; u64 ipat = 0;
/* For VT-d and EPT combination /* For VT-d and EPT combination
* 1. MMIO: always map as UC * 1. MMIO: guest may want to apply WC, trust it.
* 2. EPT with VT-d: * 2. EPT with VT-d:
* a. VT-d without snooping control feature: can't guarantee the * a. VT-d without snooping control feature: can't guarantee the
* result, try to trust guest. * result, try to trust guest. So the same as item 1.
* b. VT-d with snooping control feature: snooping control feature of * b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it * VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3. * to WB to keep consistent with host. So the same as item 3.
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR * consistent with host MTRR
*/ */
if (is_mmio) { if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT; ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK; cache = MTRR_TYPE_WRBACK;
goto exit; goto exit;

View File

@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
cpuid_count(XSTATE_CPUID, index, cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx); &size, &offset, &ecx, &edx);
memcpy(dest, src + offset, size); memcpy(dest, src + offset, size);
} else }
WARN_ON_ONCE(1);
valid -= feature; valid -= feature;
} }
@ -7315,11 +7314,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu = kvm_x86_ops->vcpu_create(kvm, id); vcpu = kvm_x86_ops->vcpu_create(kvm, id);
/*
* Activate fpu unconditionally in case the guest needs eager FPU. It will be
* deactivated soon if it doesn't.
*/
kvm_x86_ops->fpu_activate(vcpu);
return vcpu; return vcpu;
} }
@ -8218,6 +8212,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
kvm_x86_ops->interrupt_allowed(vcpu); kvm_x86_ops->interrupt_allowed(vcpu);
} }
void kvm_arch_start_assignment(struct kvm *kvm)
{
atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
void kvm_arch_end_assignment(struct kvm *kvm)
{
atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
void kvm_arch_register_noncoherent_dma(struct kvm *kvm) void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{ {
atomic_inc(&kvm->arch.noncoherent_dma_count); atomic_inc(&kvm->arch.noncoherent_dma_count);

View File

@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned long idx = BIO_POOL_NONE; unsigned long idx = BIO_POOL_NONE;
unsigned inline_vecs; unsigned inline_vecs;
if (!bs) { if (!bs || !bs->bio_integrity_pool) {
bip = kmalloc(sizeof(struct bio_integrity_payload) + bip = kmalloc(sizeof(struct bio_integrity_payload) +
sizeof(struct bio_vec) * nr_vecs, gfp_mask); sizeof(struct bio_vec) * nr_vecs, gfp_mask);
inline_vecs = nr_vecs; inline_vecs = nr_vecs;
@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
kfree(page_address(bip->bip_vec->bv_page) + kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset); bip->bip_vec->bv_offset);
if (bs) { if (bs && bs->bio_integrity_pool) {
if (bip->bip_slab != BIO_POOL_NONE) if (bip->bip_slab != BIO_POOL_NONE)
bvec_free(bs->bvec_integrity_pool, bip->bip_vec, bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
bip->bip_slab); bip->bip_slab);

View File

@ -29,6 +29,14 @@
#define MAX_KEY_LEN 100 #define MAX_KEY_LEN 100
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
* blkcg_pol_register_mutex nests outside of it and synchronizes entire
* policy [un]register operations including cgroup file additions /
* removals. Putting cgroup file registration outside blkcg_pol_mutex
* allows grabbing it from cgroup callbacks.
*/
static DEFINE_MUTEX(blkcg_pol_register_mutex);
static DEFINE_MUTEX(blkcg_pol_mutex); static DEFINE_MUTEX(blkcg_pol_mutex);
struct blkcg blkcg_root; struct blkcg blkcg_root;
@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
static bool blkcg_policy_enabled(struct request_queue *q, static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol) const struct blkcg_policy *pol)
{ {
@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i; int i;
/* mutex_lock(&blkcg_pol_mutex);
* XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
* which ends up putting cgroup's internal cgroup_tree_mutex under
* it; however, cgroup_tree_mutex is nested above cgroup file
* active protection and grabbing blkcg_pol_mutex from a cgroup
* file operation creates a possible circular dependency. cgroup
* internal locking is planned to go through further simplification
* and this issue should go away soon. For now, let's trylock
* blkcg_pol_mutex and restart the write on failure.
*
* http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
*/
if (!mutex_trylock(&blkcg_pol_mutex))
return restart_syscall();
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
/* /*
@ -822,8 +819,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
{ {
struct blkcg *blkcg = css_to_blkcg(css); struct blkcg *blkcg = css_to_blkcg(css);
if (blkcg != &blkcg_root) mutex_lock(&blkcg_pol_mutex);
list_del(&blkcg->all_blkcgs_node);
mutex_unlock(&blkcg_pol_mutex);
if (blkcg != &blkcg_root) {
int i;
for (i = 0; i < BLKCG_MAX_POLS; i++)
kfree(blkcg->pd[i]);
kfree(blkcg); kfree(blkcg);
}
} }
static struct cgroup_subsys_state * static struct cgroup_subsys_state *
@ -833,6 +839,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
struct cgroup_subsys_state *ret; struct cgroup_subsys_state *ret;
int i; int i;
mutex_lock(&blkcg_pol_mutex);
if (!parent_css) { if (!parent_css) {
blkcg = &blkcg_root; blkcg = &blkcg_root;
goto done; goto done;
@ -875,14 +883,17 @@ done:
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list); INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif #endif
list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
mutex_unlock(&blkcg_pol_mutex);
return &blkcg->css; return &blkcg->css;
free_pd_blkcg: free_pd_blkcg:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
kfree(blkcg->pd[i]); kfree(blkcg->pd[i]);
free_blkcg: free_blkcg:
kfree(blkcg); kfree(blkcg);
mutex_unlock(&blkcg_pol_mutex);
return ret; return ret;
} }
@ -1037,10 +1048,8 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol) const struct blkcg_policy *pol)
{ {
LIST_HEAD(pds); LIST_HEAD(pds);
LIST_HEAD(cpds);
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *nd; struct blkg_policy_data *pd, *nd;
struct blkcg_policy_data *cpd, *cnd;
int cnt = 0, ret; int cnt = 0, ret;
if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
@ -1053,10 +1062,7 @@ int blkcg_activate_policy(struct request_queue *q,
cnt++; cnt++;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* /* allocate per-blkg policy data for all existing blkgs */
* Allocate per-blkg and per-blkcg policy data
* for all existing blkgs.
*/
while (cnt--) { while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) { if (!pd) {
@ -1064,15 +1070,6 @@ int blkcg_activate_policy(struct request_queue *q,
goto out_free; goto out_free;
} }
list_add_tail(&pd->alloc_node, &pds); list_add_tail(&pd->alloc_node, &pds);
if (!pol->cpd_size)
continue;
cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
if (!cpd) {
ret = -ENOMEM;
goto out_free;
}
list_add_tail(&cpd->alloc_node, &cpds);
} }
/* /*
@ -1082,32 +1079,17 @@ int blkcg_activate_policy(struct request_queue *q,
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (WARN_ON(list_empty(&pds)) || if (WARN_ON(list_empty(&pds))) {
WARN_ON(pol->cpd_size && list_empty(&cpds))) {
/* umm... this shouldn't happen, just abort */ /* umm... this shouldn't happen, just abort */
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
cpd = list_first_entry(&cpds, struct blkcg_policy_data,
alloc_node);
list_del_init(&cpd->alloc_node);
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node); list_del_init(&pd->alloc_node);
/* grab blkcg lock too while installing @pd on @blkg */ /* grab blkcg lock too while installing @pd on @blkg */
spin_lock(&blkg->blkcg->lock); spin_lock(&blkg->blkcg->lock);
if (!pol->cpd_size)
goto no_cpd;
if (!blkg->blkcg->pd[pol->plid]) {
/* Per-policy per-blkcg data */
blkg->blkcg->pd[pol->plid] = cpd;
cpd->plid = pol->plid;
pol->cpd_init_fn(blkg->blkcg);
} else { /* must free it as it has already been extracted */
kfree(cpd);
}
no_cpd:
blkg->pd[pol->plid] = pd; blkg->pd[pol->plid] = pd;
pd->blkg = blkg; pd->blkg = blkg;
pd->plid = pol->plid; pd->plid = pol->plid;
@ -1124,8 +1106,6 @@ out_free:
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
list_for_each_entry_safe(pd, nd, &pds, alloc_node) list_for_each_entry_safe(pd, nd, &pds, alloc_node)
kfree(pd); kfree(pd);
list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
kfree(cpd);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blkcg_activate_policy); EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@ -1162,8 +1142,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
kfree(blkg->pd[pol->plid]); kfree(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
kfree(blkg->blkcg->pd[pol->plid]);
blkg->blkcg->pd[pol->plid] = NULL;
spin_unlock(&blkg->blkcg->lock); spin_unlock(&blkg->blkcg->lock);
} }
@ -1182,11 +1160,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
*/ */
int blkcg_policy_register(struct blkcg_policy *pol) int blkcg_policy_register(struct blkcg_policy *pol)
{ {
struct blkcg *blkcg;
int i, ret; int i, ret;
if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
return -EINVAL; return -EINVAL;
mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex); mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */ /* find an empty slot */
@ -1195,19 +1175,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
if (!blkcg_policy[i]) if (!blkcg_policy[i])
break; break;
if (i >= BLKCG_MAX_POLS) if (i >= BLKCG_MAX_POLS)
goto out_unlock; goto err_unlock;
/* register and update blkgs */ /* register @pol */
pol->plid = i; pol->plid = i;
blkcg_policy[i] = pol; blkcg_policy[pol->plid] = pol;
/* allocate and install cpd's */
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
struct blkcg_policy_data *cpd;
cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
if (!cpd) {
mutex_unlock(&blkcg_pol_mutex);
goto err_free_cpds;
}
blkcg->pd[pol->plid] = cpd;
cpd->plid = pol->plid;
pol->cpd_init_fn(blkcg);
}
}
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */ /* everything is in place, add intf files for the new policy */
if (pol->cftypes) if (pol->cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
pol->cftypes)); pol->cftypes));
ret = 0; mutex_unlock(&blkcg_pol_register_mutex);
out_unlock: return 0;
err_free_cpds:
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
kfree(blkcg->pd[pol->plid]);
blkcg->pd[pol->plid] = NULL;
}
}
blkcg_policy[pol->plid] = NULL;
err_unlock:
mutex_unlock(&blkcg_pol_mutex); mutex_unlock(&blkcg_pol_mutex);
mutex_unlock(&blkcg_pol_register_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blkcg_policy_register); EXPORT_SYMBOL_GPL(blkcg_policy_register);
@ -1220,7 +1230,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
*/ */
void blkcg_policy_unregister(struct blkcg_policy *pol) void blkcg_policy_unregister(struct blkcg_policy *pol)
{ {
mutex_lock(&blkcg_pol_mutex); struct blkcg *blkcg;
mutex_lock(&blkcg_pol_register_mutex);
if (WARN_ON(blkcg_policy[pol->plid] != pol)) if (WARN_ON(blkcg_policy[pol->plid] != pol))
goto out_unlock; goto out_unlock;
@ -1229,9 +1241,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
if (pol->cftypes) if (pol->cftypes)
cgroup_rm_cftypes(pol->cftypes); cgroup_rm_cftypes(pol->cftypes);
/* unregister and update blkgs */ /* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
kfree(blkcg->pd[pol->plid]);
blkcg->pd[pol->plid] = NULL;
}
}
blkcg_policy[pol->plid] = NULL; blkcg_policy[pol->plid] = NULL;
out_unlock:
mutex_unlock(&blkcg_pol_mutex); mutex_unlock(&blkcg_pol_mutex);
out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
} }
EXPORT_SYMBOL_GPL(blkcg_policy_unregister); EXPORT_SYMBOL_GPL(blkcg_policy_unregister);

View File

@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
BUILD_BUG_ON(__REQ_NR_BITS > 8 * BUILD_BUG_ON(__REQ_NR_BITS > 8 *
sizeof(((struct request *)0)->cmd_flags)); FIELD_SIZEOF(struct request, cmd_flags));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */ /* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd", kblockd_workqueue = alloc_workqueue("kblockd",

View File

@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_hctxs; goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids; q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues; q->nr_hw_queues = set->nr_hw_queues;

View File

@ -24,7 +24,7 @@ acpi-y += nvs.o
# Power management related files # Power management related files
acpi-y += wakeup.o acpi-y += wakeup.o
acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
acpi-y += device_pm.o acpi-y += device_sysfs.o device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o acpi-$(CONFIG_ACPI_SLEEP) += proc.o

View File

@ -423,6 +423,413 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
acpi_evaluate_ost(handle, type, ost_code, NULL); acpi_evaluate_ost(handle, type, ost_code, NULL);
} }
static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
device->driver->ops.notify(device, event);
}
static void acpi_device_notify_fixed(void *data)
{
struct acpi_device *device = data;
/* Fixed hardware devices have no handles */
acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
}
static u32 acpi_device_fixed_event(void *data)
{
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
return ACPI_INTERRUPT_HANDLED;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
else
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
acpi_device_notify,
device);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
else
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_device_notify);
}
/* --------------------------------------------------------------------------
Device Matching
-------------------------------------------------------------------------- */
/**
* acpi_device_is_first_physical_node - Is given dev first physical node
* @adev: ACPI companion device
* @dev: Physical device to check
*
* Function checks if given @dev is the first physical devices attached to
* the ACPI companion device. This distinction is needed in some cases
* where the same companion device is shared between many physical devices.
*
* Note that the caller have to provide valid @adev pointer.
*/
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev)
{
bool ret = false;
mutex_lock(&adev->physical_node_lock);
if (!list_empty(&adev->physical_node_list)) {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
ret = node->dev == dev;
}
mutex_unlock(&adev->physical_node_lock);
return ret;
}
/*
* acpi_companion_match() - Can we match via ACPI companion device
* @dev: Device in question
*
* Check if the given device has an ACPI companion and if that companion has
* a valid list of PNP IDs, and if the device is the first (primary) physical
* device associated with it. Return the companion pointer if that's the case
* or NULL otherwise.
*
* If multiple physical devices are attached to a single ACPI companion, we need
* to be careful. The usage scenario for this kind of relationship is that all
* of the physical devices in question use resources provided by the ACPI
* companion. A typical case is an MFD device where all the sub-devices share
* the parent's ACPI companion. In such cases we can only allow the primary
* (first) physical device to be matched with the help of the companion's PNP
* IDs.
*
* Additional physical devices sharing the ACPI companion can still use
* resources available from it but they will be matched normally using functions
* provided by their bus types (and analogously for their modalias).
*/
struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
struct mutex *physical_node_lock;
adev = ACPI_COMPANION(dev);
if (!adev)
return NULL;
if (list_empty(&adev->pnp.ids))
return NULL;
physical_node_lock = &adev->physical_node_lock;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
adev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
if (node->dev != dev)
adev = NULL;
}
mutex_unlock(physical_node_lock);
return adev;
}
/**
* acpi_of_match_device - Match device object using the "compatible" property.
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
*
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(struct acpi_device *adev,
const struct of_device_id *of_match_table)
{
const union acpi_object *of_compatible, *obj;
int i, nval;
if (!adev)
return false;
of_compatible = adev->data.of_compatible;
if (!of_match_table || !of_compatible)
return false;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
/* Now we can look for the driver DT compatible strings */
for (i = 0; i < nval; i++, obj++) {
const struct of_device_id *id;
for (id = of_match_table; id->compatible[0]; id++)
if (!strcasecmp(obj->string.pointer, id->compatible))
return true;
}
return false;
}
static bool __acpi_match_device_cls(const struct acpi_device_id *id,
struct acpi_hardware_id *hwid)
{
int i, msk, byte_shift;
char buf[3];
if (!id->cls)
return false;
/* Apply class-code bitmask, before checking each class-code byte */
for (i = 1; i <= 3; i++) {
byte_shift = 8 * (3 - i);
msk = (id->cls_msk >> byte_shift) & 0xFF;
if (!msk)
continue;
sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
return false;
}
return true;
}
static const struct acpi_device_id *__acpi_match_device(
struct acpi_device *device,
const struct acpi_device_id *ids,
const struct of_device_id *of_ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
/*
* If the device is not present, it is unnecessary to load device
* driver for it.
*/
if (!device || !device->status.present)
return NULL;
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
for (id = ids; id->id[0] || id->cls; id++) {
if (id->id[0] && !strcmp((char *) id->id, hwid->id))
return id;
else if (id->cls && __acpi_match_device_cls(id, hwid))
return id;
}
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
*
* The id returned by the below is not valid, but the only
* caller passing non-NULL of_ids here is only interested in
* whether or not the return value is NULL.
*/
if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
&& acpi_of_match_device(device, of_ids))
return id;
}
return NULL;
}
/**
* acpi_match_device - Match a struct device against a given list of ACPI IDs
* @ids: Array of struct acpi_device_id object to match against.
* @dev: The device structure to match.
*
* Check if @dev has a valid ACPI handle and if there is a struct acpi_device
* object for that handle and use that object to match against a given list of
* device IDs.
*
* Return a pointer to the first matching ID on success or %NULL on failure.
*/
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
}
EXPORT_SYMBOL_GPL(acpi_match_device);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
if (!drv->acpi_match_table)
return acpi_of_match_device(ACPI_COMPANION(dev),
drv->of_match_table);
return !!__acpi_match_device(acpi_companion_match(dev),
drv->acpi_match_table, drv->of_match_table);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
/* --------------------------------------------------------------------------
ACPI Driver Management
-------------------------------------------------------------------------- */
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
int ret;
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
ret = driver_register(&driver->drv);
return ret;
}
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
* @driver: driver to unregister
*
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
/* --------------------------------------------------------------------------
ACPI Bus operations
-------------------------------------------------------------------------- */
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return acpi_dev->flags.match_driver
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
static int acpi_device_probe(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
int ret;
if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
return -EINVAL;
if (!acpi_drv->ops.add)
return -ENOSYS;
ret = acpi_drv->ops.add(acpi_dev);
if (ret)
return ret;
acpi_dev->driver = acpi_drv;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Driver [%s] successfully bound to device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id));
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
return ret;
}
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id));
get_device(dev);
return 0;
}
static int acpi_device_remove(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
if (acpi_drv) {
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
}
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
put_device(dev);
return 0;
}
struct bus_type acpi_bus_type = {
.name = "acpi",
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Initialization/Cleanup Initialization/Cleanup
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
@ -661,7 +1068,9 @@ static int __init acpi_bus_init(void)
*/ */
acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
return 0; result = bus_register(&acpi_bus_type);
if (!result)
return 0;
/* Mimic structured exception handling */ /* Mimic structured exception handling */
error1: error1:

521
drivers/acpi/device_sysfs.c Normal file
View File

@ -0,0 +1,521 @@
/*
* drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
*
* Copyright (C) 2015, Intel Corp.
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/nls.h>
#include "internal.h"
/**
* create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
* Return: 0: no _HID and no _CID
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
int count;
struct acpi_hardware_id *id;
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
* device's list.
*/
count = 0;
list_for_each_entry(id, &acpi_dev->pnp.ids, list)
if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
count++;
if (!count)
return 0;
len = snprintf(modalias, size, "acpi:");
if (len <= 0)
return len;
size -= len;
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
continue;
count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
/**
* create_of_modalias - Creates DT compatible string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Expose DT compatible modalias as of:NnameTCcompatible. This function should
* only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
* ACPI/PNP IDs.
*/
static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
int len, count;
int i, nval;
char *c;
acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
ACPI_FREE(buf.pointer);
if (len <= 0)
return len;
of_compatible = acpi_dev->data.of_compatible;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
for (i = 0; i < nval; i++, obj++) {
count = snprintf(&modalias[len], size, "C%s",
obj->string.pointer);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
int __acpi_device_uevent_modalias(struct acpi_device *adev,
struct kobj_uevent_env *env)
{
int len;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
if (!adev->data.of_compatible)
return 0;
if (len > 0 && add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
return 0;
}
/**
* acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
*
* Create the uevent modalias field for ACPI-enumerated devices.
*
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
}
EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
{
int len, count;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
len = create_pnp_modalias(adev, buf, size - 1);
if (len < 0) {
return len;
} else if (len > 0) {
buf[len++] = '\n';
size -= len;
}
if (!adev->data.of_compatible)
return len;
count = create_of_modalias(adev, buf + len, size - 1);
if (count < 0) {
return count;
} else if (count > 0) {
len += count;
buf[len++] = '\n';
}
return len;
}
/**
* acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
*
* Create the modalias sysfs attribute for ACPI-enumerated devices.
*
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
int acpi_device_modalias(struct device *dev, char *buf, int size)
{
return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
}
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
int state;
int ret;
ret = acpi_device_get_power(adev, &state);
if (ret)
return ret;
return sprintf(buf, "%s\n", acpi_power_state_string(state));
}
static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
static ssize_t power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
}
static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
acpi_status status;
if (!count || buf[0] != '1')
return -EINVAL;
if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
&& !acpi_device->driver)
return -ENODEV;
status = acpi_get_type(acpi_device->handle, &not_used);
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
get_device(&acpi_device->dev);
status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static ssize_t
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
static ssize_t acpi_device_uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
static ssize_t acpi_device_adr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "0x%08x\n",
(unsigned int)(acpi_dev->pnp.bus_address));
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
static ssize_t
acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
int result;
if (acpi_dev->pnp.str_obj == NULL)
return 0;
/*
* The _STR object contains a Unicode identifier for a device.
* We need to convert to utf-8 so it can be displayed.
*/
result = utf16s_to_utf8s(
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE);
buf[result++] = '\n';
return result;
}
static DEVICE_ATTR(description, 0444, description_show, NULL);
static ssize_t
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
if (ACPI_FAILURE(status))
return -ENODEV;
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sta;
status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
return sprintf(buf, "%llu\n", sta);
}
static DEVICE_ATTR_RO(status);
/**
* acpi_device_setup_files - Create sysfs attributes of an ACPI device.
* @dev: ACPI device object.
*/
int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
int result = 0;
/*
* Devices gotten from FADT don't have a "path" attribute
*/
if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
if (result)
goto end;
}
if (!list_empty(&dev->pnp.ids)) {
result = device_create_file(&dev->dev, &dev_attr_hid);
if (result)
goto end;
result = device_create_file(&dev->dev, &dev_attr_modalias);
if (result)
goto end;
}
/*
* If device has _STR, 'description' file is created
*/
if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
if (ACPI_FAILURE(status))
buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer;
result = device_create_file(&dev->dev, &dev_attr_description);
if (result)
goto end;
}
if (dev->pnp.type.bus_address)
result = device_create_file(&dev->dev, &dev_attr_adr);
if (dev->pnp.unique_id)
result = device_create_file(&dev->dev, &dev_attr_uid);
if (acpi_has_method(dev->handle, "_SUN")) {
result = device_create_file(&dev->dev, &dev_attr_sun);
if (result)
goto end;
}
if (acpi_has_method(dev->handle, "_STA")) {
result = device_create_file(&dev->dev, &dev_attr_status);
if (result)
goto end;
}
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
return result;
}
if (dev->flags.power_manageable) {
result = device_create_file(&dev->dev, &dev_attr_power_state);
if (result)
return result;
if (dev->power.flags.power_resources)
result = device_create_file(&dev->dev,
&dev_attr_real_power_state);
}
end:
return result;
}
/**
* acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
* @dev: ACPI device object.
*/
void acpi_device_remove_files(struct acpi_device *dev)
{
if (dev->flags.power_manageable) {
device_remove_file(&dev->dev, &dev_attr_power_state);
if (dev->power.flags.power_resources)
device_remove_file(&dev->dev,
&dev_attr_real_power_state);
}
/*
* If device has _STR, remove 'description' file
*/
if (acpi_has_method(dev->handle, "_STR")) {
kfree(dev->pnp.str_obj);
device_remove_file(&dev->dev, &dev_attr_description);
}
/*
* If device has _EJ0, remove 'eject' file.
*/
if (acpi_has_method(dev->handle, "_EJ0"))
device_remove_file(&dev->dev, &dev_attr_eject);
if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
if (dev->pnp.type.bus_address)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (acpi_has_method(dev->handle, "_STA"))
device_remove_file(&dev->dev, &dev_attr_status);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}

View File

@ -93,6 +93,8 @@ int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *)); void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta); int type, unsigned long long sta);
int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device); void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
bool acpi_device_is_present(struct acpi_device *adev); bool acpi_device_is_present(struct acpi_device *adev);
@ -100,6 +102,13 @@ bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev, bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev); const struct device *dev);
/* --------------------------------------------------------------------------
Device Matching and Notification
-------------------------------------------------------------------------- */
struct acpi_device *acpi_companion_match(const struct device *dev);
int __acpi_device_uevent_modalias(struct acpi_device *adev,
struct kobj_uevent_env *env);
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Power Resource Power Resource
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */

View File

@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
bool wp = addr->info.mem.write_protect; bool wp = addr->info.mem.write_protect;
u64 len = attr->address_length; u64 len = attr->address_length;
u64 start, end, offset = 0;
struct resource *res = &win->res; struct resource *res = &win->res;
/* /*
@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
addr->min_address_fixed, addr->max_address_fixed, len); addr->min_address_fixed, addr->max_address_fixed, len);
res->start = attr->minimum;
res->end = attr->maximum;
/* /*
* For bridges that translate addresses across the bridge, * For bridges that translate addresses across the bridge,
* translation_offset is the offset that must be added to the * translation_offset is the offset that must be added to the
@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
* primary side. Non-bridge devices must list 0 for all Address * primary side. Non-bridge devices must list 0 for all Address
* Translation offset bits. * Translation offset bits.
*/ */
if (addr->producer_consumer == ACPI_PRODUCER) { if (addr->producer_consumer == ACPI_PRODUCER)
res->start += attr->translation_offset; offset = attr->translation_offset;
res->end += attr->translation_offset; else if (attr->translation_offset)
} else if (attr->translation_offset) {
pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
attr->translation_offset); attr->translation_offset);
start = attr->minimum + offset;
end = attr->maximum + offset;
win->offset = offset;
res->start = start;
res->end = end;
if (sizeof(resource_size_t) < sizeof(u64) &&
(offset != win->offset || start != res->start || end != res->end)) {
pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
attr->minimum, attr->maximum);
return false;
} }
switch (addr->resource_type) { switch (addr->resource_type) {
@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
return false; return false;
} }
win->offset = attr->translation_offset;
if (addr->producer_consumer == ACPI_PRODUCER) if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW; res->flags |= IORESOURCE_WINDOW;

View File

@ -115,278 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
return 0; return 0;
} }
/**
* create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
* Return: 0: no _HID and no _CID
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
int count;
struct acpi_hardware_id *id;
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
* device's list.
*/
count = 0;
list_for_each_entry(id, &acpi_dev->pnp.ids, list)
if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
count++;
if (!count)
return 0;
len = snprintf(modalias, size, "acpi:");
if (len <= 0)
return len;
size -= len;
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
continue;
count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
/**
* create_of_modalias - Creates DT compatible string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Expose DT compatible modalias as of:NnameTCcompatible. This function should
* only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
* ACPI/PNP IDs.
*/
static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
int len, count;
int i, nval;
char *c;
acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
ACPI_FREE(buf.pointer);
if (len <= 0)
return len;
of_compatible = acpi_dev->data.of_compatible;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
for (i = 0; i < nval; i++, obj++) {
count = snprintf(&modalias[len], size, "C%s",
obj->string.pointer);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
/**
* acpi_device_is_first_physical_node - Is given dev first physical node
* @adev: ACPI companion device
* @dev: Physical device to check
*
* Function checks if given @dev is the first physical devices attached to
* the ACPI companion device. This distinction is needed in some cases
* where the same companion device is shared between many physical devices.
*
* Note that the caller have to provide valid @adev pointer.
*/
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev)
{
bool ret = false;
mutex_lock(&adev->physical_node_lock);
if (!list_empty(&adev->physical_node_list)) {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
ret = node->dev == dev;
}
mutex_unlock(&adev->physical_node_lock);
return ret;
}
/*
* acpi_companion_match() - Can we match via ACPI companion device
* @dev: Device in question
*
* Check if the given device has an ACPI companion and if that companion has
* a valid list of PNP IDs, and if the device is the first (primary) physical
* device associated with it. Return the companion pointer if that's the case
* or NULL otherwise.
*
* If multiple physical devices are attached to a single ACPI companion, we need
* to be careful. The usage scenario for this kind of relationship is that all
* of the physical devices in question use resources provided by the ACPI
* companion. A typical case is an MFD device where all the sub-devices share
* the parent's ACPI companion. In such cases we can only allow the primary
* (first) physical device to be matched with the help of the companion's PNP
* IDs.
*
* Additional physical devices sharing the ACPI companion can still use
* resources available from it but they will be matched normally using functions
* provided by their bus types (and analogously for their modalias).
*/
static struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
adev = ACPI_COMPANION(dev);
if (!adev)
return NULL;
if (list_empty(&adev->pnp.ids))
return NULL;
return acpi_device_is_first_physical_node(adev, dev) ? adev : NULL;
}
static int __acpi_device_uevent_modalias(struct acpi_device *adev,
struct kobj_uevent_env *env)
{
int len;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
if (!adev->data.of_compatible)
return 0;
if (len > 0 && add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
return 0;
}
/*
* Creates uevent modalias field for ACPI enumerated devices.
* Because the other buses does not support ACPI HIDs & CIDs.
* e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
* "acpi:IBM0001:ACPI0001"
*/
int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
}
EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
{
int len, count;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
len = create_pnp_modalias(adev, buf, size - 1);
if (len < 0) {
return len;
} else if (len > 0) {
buf[len++] = '\n';
size -= len;
}
if (!adev->data.of_compatible)
return len;
count = create_of_modalias(adev, buf + len, size - 1);
if (count < 0) {
return count;
} else if (count > 0) {
len += count;
buf[len++] = '\n';
}
return len;
}
/*
* Creates modalias sysfs attribute for ACPI enumerated devices.
* Because the other buses does not support ACPI HIDs & CIDs.
* e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
* "acpi:IBM0001:ACPI0001"
*/
int acpi_device_modalias(struct device *dev, char *buf, int size)
{
return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
}
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
{ {
struct acpi_device_physical_node *pn; struct acpi_device_physical_node *pn;
@ -715,423 +443,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
unlock_device_hotplug(); unlock_device_hotplug();
} }
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
int state;
int ret;
ret = acpi_device_get_power(adev, &state);
if (ret)
return ret;
return sprintf(buf, "%s\n", acpi_power_state_string(state));
}
static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
static ssize_t power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
}
static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
acpi_status status;
if (!count || buf[0] != '1')
return -EINVAL;
if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
&& !acpi_device->driver)
return -ENODEV;
status = acpi_get_type(acpi_device->handle, &not_used);
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
get_device(&acpi_device->dev);
status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static ssize_t
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
static ssize_t acpi_device_uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
static ssize_t acpi_device_adr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "0x%08x\n",
(unsigned int)(acpi_dev->pnp.bus_address));
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
static ssize_t
acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
int result;
if (acpi_dev->pnp.str_obj == NULL)
return 0;
/*
* The _STR object contains a Unicode identifier for a device.
* We need to convert to utf-8 so it can be displayed.
*/
result = utf16s_to_utf8s(
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE);
buf[result++] = '\n';
return result;
}
static DEVICE_ATTR(description, 0444, description_show, NULL);
static ssize_t
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
if (ACPI_FAILURE(status))
return -ENODEV;
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sta;
status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
return sprintf(buf, "%llu\n", sta);
}
static DEVICE_ATTR_RO(status);
static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
int result = 0;
/*
* Devices gotten from FADT don't have a "path" attribute
*/
if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
if (result)
goto end;
}
if (!list_empty(&dev->pnp.ids)) {
result = device_create_file(&dev->dev, &dev_attr_hid);
if (result)
goto end;
result = device_create_file(&dev->dev, &dev_attr_modalias);
if (result)
goto end;
}
/*
* If device has _STR, 'description' file is created
*/
if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
if (ACPI_FAILURE(status))
buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer;
result = device_create_file(&dev->dev, &dev_attr_description);
if (result)
goto end;
}
if (dev->pnp.type.bus_address)
result = device_create_file(&dev->dev, &dev_attr_adr);
if (dev->pnp.unique_id)
result = device_create_file(&dev->dev, &dev_attr_uid);
if (acpi_has_method(dev->handle, "_SUN")) {
result = device_create_file(&dev->dev, &dev_attr_sun);
if (result)
goto end;
}
if (acpi_has_method(dev->handle, "_STA")) {
result = device_create_file(&dev->dev, &dev_attr_status);
if (result)
goto end;
}
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
return result;
}
if (dev->flags.power_manageable) {
result = device_create_file(&dev->dev, &dev_attr_power_state);
if (result)
return result;
if (dev->power.flags.power_resources)
result = device_create_file(&dev->dev,
&dev_attr_real_power_state);
}
end:
return result;
}
static void acpi_device_remove_files(struct acpi_device *dev)
{
if (dev->flags.power_manageable) {
device_remove_file(&dev->dev, &dev_attr_power_state);
if (dev->power.flags.power_resources)
device_remove_file(&dev->dev,
&dev_attr_real_power_state);
}
/*
* If device has _STR, remove 'description' file
*/
if (acpi_has_method(dev->handle, "_STR")) {
kfree(dev->pnp.str_obj);
device_remove_file(&dev->dev, &dev_attr_description);
}
/*
* If device has _EJ0, remove 'eject' file.
*/
if (acpi_has_method(dev->handle, "_EJ0"))
device_remove_file(&dev->dev, &dev_attr_eject);
if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
if (dev->pnp.type.bus_address)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (acpi_has_method(dev->handle, "_STA"))
device_remove_file(&dev->dev, &dev_attr_status);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
/* --------------------------------------------------------------------------
ACPI Bus operations
-------------------------------------------------------------------------- */
/**
* acpi_of_match_device - Match device object using the "compatible" property.
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
*
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(struct acpi_device *adev,
const struct of_device_id *of_match_table)
{
const union acpi_object *of_compatible, *obj;
int i, nval;
if (!adev)
return false;
of_compatible = adev->data.of_compatible;
if (!of_match_table || !of_compatible)
return false;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
/* Now we can look for the driver DT compatible strings */
for (i = 0; i < nval; i++, obj++) {
const struct of_device_id *id;
for (id = of_match_table; id->compatible[0]; id++)
if (!strcasecmp(obj->string.pointer, id->compatible))
return true;
}
return false;
}
static bool __acpi_match_device_cls(const struct acpi_device_id *id,
struct acpi_hardware_id *hwid)
{
int i, msk, byte_shift;
char buf[3];
if (!id->cls)
return false;
/* Apply class-code bitmask, before checking each class-code byte */
for (i = 1; i <= 3; i++) {
byte_shift = 8 * (3 - i);
msk = (id->cls_msk >> byte_shift) & 0xFF;
if (!msk)
continue;
sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
return false;
}
return true;
}
static const struct acpi_device_id *__acpi_match_device(
struct acpi_device *device,
const struct acpi_device_id *ids,
const struct of_device_id *of_ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
/*
* If the device is not present, it is unnecessary to load device
* driver for it.
*/
if (!device || !device->status.present)
return NULL;
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
for (id = ids; id->id[0] || id->cls; id++) {
if (id->id[0] && !strcmp((char *) id->id, hwid->id))
return id;
else if (id->cls && __acpi_match_device_cls(id, hwid))
return id;
}
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
*
* The id returned by the below is not valid, but the only
* caller passing non-NULL of_ids here is only interested in
* whether or not the return value is NULL.
*/
if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
&& acpi_of_match_device(device, of_ids))
return id;
}
return NULL;
}
/**
* acpi_match_device - Match a struct device against a given list of ACPI IDs
* @ids: Array of struct acpi_device_id object to match against.
* @dev: The device structure to match.
*
* Check if @dev has a valid ACPI handle and if there is a struct acpi_device
* object for that handle and use that object to match against a given list of
* device IDs.
*
* Return a pointer to the first matching ID on success or %NULL on failure.
*/
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
}
EXPORT_SYMBOL_GPL(acpi_match_device);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
if (!drv->acpi_match_table)
return acpi_of_match_device(ACPI_COMPANION(dev),
drv->of_match_table);
return !!__acpi_match_device(acpi_companion_match(dev),
drv->acpi_match_table, drv->of_match_table);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
static void acpi_free_power_resources_lists(struct acpi_device *device) static void acpi_free_power_resources_lists(struct acpi_device *device)
{ {
int i; int i;
@ -1158,144 +469,6 @@ static void acpi_device_release(struct device *dev)
kfree(acpi_dev); kfree(acpi_dev);
} }
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return acpi_dev->flags.match_driver
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
device->driver->ops.notify(device, event);
}
static void acpi_device_notify_fixed(void *data)
{
struct acpi_device *device = data;
/* Fixed hardware devices have no handles */
acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
}
static u32 acpi_device_fixed_event(void *data)
{
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
return ACPI_INTERRUPT_HANDLED;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
else
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
acpi_device_notify,
device);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
else
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_device_notify);
}
static int acpi_device_probe(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
int ret;
if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
return -EINVAL;
if (!acpi_drv->ops.add)
return -ENOSYS;
ret = acpi_drv->ops.add(acpi_dev);
if (ret)
return ret;
acpi_dev->driver = acpi_drv;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Driver [%s] successfully bound to device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id));
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
return ret;
}
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id));
get_device(dev);
return 0;
}
static int acpi_device_remove(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
if (acpi_drv) {
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
}
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
put_device(dev);
return 0;
}
struct bus_type acpi_bus_type = {
.name = "acpi",
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
static void acpi_device_del(struct acpi_device *device) static void acpi_device_del(struct acpi_device *device)
{ {
mutex_lock(&acpi_device_lock); mutex_lock(&acpi_device_lock);
@ -1542,47 +715,6 @@ struct acpi_device *acpi_get_next_child(struct device *dev,
return next == head ? NULL : list_entry(next, struct acpi_device, node); return next == head ? NULL : list_entry(next, struct acpi_device, node);
} }
/* --------------------------------------------------------------------------
Driver Management
-------------------------------------------------------------------------- */
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
int ret;
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
ret = driver_register(&driver->drv);
return ret;
}
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
* @driver: driver to unregister
*
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Device Enumeration Device Enumeration
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
@ -2758,12 +1890,6 @@ int __init acpi_scan_init(void)
{ {
int result; int result;
result = bus_register(&acpi_bus_type);
if (result) {
/* We don't want to quit even if we failed to add suspend/resume */
printk(KERN_ERR PREFIX "Could not register bus type\n");
}
acpi_pci_root_init(); acpi_pci_root_init();
acpi_pci_link_init(); acpi_pci_link_init();
acpi_processor_init(); acpi_processor_init();

View File

@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_disk; goto out_free_disk;
add_disk(ns->disk); add_disk(ns->disk);
if (ns->ms) if (ns->ms) {
revalidate_disk(ns->disk); struct block_device *bd = bdget_disk(ns->disk, 0);
if (!bd)
return;
if (blkdev_get(bd, FMODE_READ, NULL)) {
bdput(bd);
return;
}
blkdev_reread_part(bd);
blkdev_put(bd, FMODE_READ);
}
return; return;
out_free_disk: out_free_disk:
kfree(disk); kfree(disk);

View File

@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev); device_initialize(&chip->dev);
chip->cdev.owner = chip->pdev->driver->owner;
cdev_init(&chip->cdev, &tpm_fops); cdev_init(&chip->cdev, &tpm_fops);
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
return chip; return chip;
} }

View File

@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
return -ENODEV; return -ENODEV;
} }
/* At least some versions of AMI BIOS have a bug that TPM2 table has
* zero address for the control area and therefore we must fail.
*/
if (!buf->control_area_pa) {
dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
return -EINVAL;
}
if (buf->hdr.length < sizeof(struct acpi_tpm2)) { if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
dev_err(dev, "TPM2 ACPI table has wrong size"); dev_err(dev, "TPM2 ACPI table has wrong size");
return -EINVAL; return -EINVAL;

View File

@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
} }
EXPORT_SYMBOL_GPL(get_governor_parent_kobj); EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
return policy && !policy_is_inactive(policy) ?
policy->freq_table : NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{ {
u64 idle_time; u64 idle_time;
@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
down_write(&policy->rwsem); down_write(&policy->rwsem);
policy->cpu = cpu; policy->cpu = cpu;
policy->governor = NULL;
up_write(&policy->rwsem); up_write(&policy->rwsem);
} }

View File

@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
} }
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
return policy ? policy->freq_table : NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_DESCRIPTION("CPUfreq frequency table helpers");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static void enter_freeze_proper(struct cpuidle_driver *drv, static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index) struct cpuidle_device *dev, int index)
{ {
tick_freeze(); /*
* trace_suspend_resume() called by tick_freeze() for the last CPU
* executing it contains RCU usage regarded as invalid in the idle
* context, so tell RCU about that.
*/
RCU_NONIDLE(tick_freeze());
/* /*
* The state used here cannot be a "coupled" one, because the "coupled" * The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping * cpuidle mechanism enables interrupts and doing that with timekeeping
@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
/* /*
* timekeeping_resume() that will be called by tick_unfreeze() for the * timekeeping_resume() that will be called by tick_unfreeze() for the
* last CPU executing it calls functions containing RCU read-side * first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that. * critical sections, so tell RCU about that.
*/ */
RCU_NONIDLE(tick_unfreeze()); RCU_NONIDLE(tick_unfreeze());

View File

@ -494,8 +494,9 @@ out:
static int ccm4309_aes_nx_encrypt(struct aead_request *req) static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc; struct blkcipher_desc desc;
u8 *iv = nx_ctx->priv.ccm.iv; u8 *iv = rctx->iv;
iv[0] = 3; iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
static int ccm4309_aes_nx_decrypt(struct aead_request *req) static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc; struct blkcipher_desc desc;
u8 *iv = nx_ctx->priv.ccm.iv; u8 *iv = rctx->iv;
iv[0] = 3; iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);

View File

@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
if (key_len < CTR_RFC3686_NONCE_SIZE) if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL; return -EINVAL;
memcpy(nx_ctx->priv.ctr.iv, memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE, in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE); CTR_RFC3686_NONCE_SIZE);
@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
unsigned int nbytes) unsigned int nbytes)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = nx_ctx->priv.ctr.iv; u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE); desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0; iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1; iv[15] = 1;
desc->info = nx_ctx->priv.ctr.iv; desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes); return ctr_aes_nx_crypt(desc, dst, src, nbytes);
} }

View File

@ -317,6 +317,7 @@ out:
static int gcm_aes_nx_crypt(struct aead_request *req, int enc) static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc; struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen; unsigned int nbytes = req->cryptlen;
@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
spin_lock_irqsave(&nx_ctx->lock, irq_flags); spin_lock_irqsave(&nx_ctx->lock, irq_flags);
desc.info = nx_ctx->priv.gcm.iv; desc.info = rctx->iv;
/* initialize the counter */ /* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
@ -424,8 +425,8 @@ out:
static int gcm_aes_nx_encrypt(struct aead_request *req) static int gcm_aes_nx_encrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = nx_ctx->priv.gcm.iv; char *iv = rctx->iv;
memcpy(iv, req->iv, 12); memcpy(iv, req->iv, 12);
@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
static int gcm_aes_nx_decrypt(struct aead_request *req) static int gcm_aes_nx_decrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = nx_ctx->priv.gcm.iv; char *iv = rctx->iv;
memcpy(iv, req->iv, 12); memcpy(iv, req->iv, 12);
@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
static int gcm4106_aes_nx_encrypt(struct aead_request *req) static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv; struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce; char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
static int gcm4106_aes_nx_decrypt(struct aead_request *req) static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv; struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce; char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);

View File

@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
unsigned int key_len) unsigned int key_len)
{ {
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
switch (key_len) { switch (key_len) {
case AES_KEYSIZE_128: case AES_KEYSIZE_128:
@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return -EINVAL; return -EINVAL;
} }
memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
return 0; return 0;
} }
@ -148,32 +149,29 @@ out:
return rc; return rc;
} }
static int nx_xcbc_init(struct shash_desc *desc) static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
{ {
struct xcbc_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *out_sg; int err;
int len;
err = nx_crypto_ctx_aes_xcbc_init(tfm);
if (err)
return err;
nx_ctx_init(nx_ctx, HCOP_FC_AES); nx_ctx_init(nx_ctx, HCOP_FC_AES);
memset(sctx, 0, sizeof *sctx);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); return 0;
memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); }
len = AES_BLOCK_SIZE; static int nx_xcbc_init(struct shash_desc *desc)
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, {
&len, nx_ctx->ap->sglen); struct xcbc_state *sctx = shash_desc_ctx(desc);
if (len != AES_BLOCK_SIZE) memset(sctx, 0, sizeof *sctx);
return -EINVAL;
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0; return 0;
} }
@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg; struct nx_sg *in_sg;
struct nx_sg *out_sg;
u32 to_process = 0, leftover, total; u32 to_process = 0, leftover, total;
unsigned int max_sg_len; unsigned int max_sg_len;
unsigned long irq_flags; unsigned long irq_flags;
@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_ctx->ap->databytelen/NX_PAGE_SIZE);
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
&len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
goto out;
}
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do { do {
to_process = total - to_process; to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1); to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
(u8 *) sctx->buffer, (u8 *) sctx->buffer,
&data_len, &data_len,
max_sg_len); max_sg_len);
if (data_len != sctx->count) if (data_len != sctx->count) {
return -EINVAL; rc = -EINVAL;
goto out;
}
} }
data_len = to_process - sctx->count; data_len = to_process - sctx->count;
@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
&data_len, &data_len,
max_sg_len); max_sg_len);
if (data_len != to_process - sctx->count) if (data_len != to_process - sctx->count) {
return -EINVAL; rc = -EINVAL;
goto out;
}
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg); sizeof(struct nx_sg);
@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
&len, nx_ctx->ap->sglen); &len, nx_ctx->ap->sglen);
if (len != sctx->count) if (len != sctx->count) {
return -EINVAL; rc = -EINVAL;
goto out;
}
len = AES_BLOCK_SIZE; len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen); nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE) if (len != AES_BLOCK_SIZE) {
return -EINVAL; rc = -EINVAL;
goto out;
}
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_aes_xcbc_init, .cra_init = nx_crypto_ctx_aes_xcbc_init2,
.cra_exit = nx_crypto_ctx_exit, .cra_exit = nx_crypto_ctx_exit,
} }
}; };

View File

@ -29,34 +29,28 @@
#include "nx.h" #include "nx.h"
static int nx_sha256_init(struct shash_desc *desc) static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); int err;
struct nx_sg *out_sg;
int len; err = nx_crypto_ctx_sha_init(tfm);
u32 max_sg_len; if (err)
return err;
nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx_init(nx_ctx, HCOP_FC_SHA);
memset(sctx, 0, sizeof *sctx);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
max_sg_len = min_t(u64, nx_ctx->ap->sglen, return 0;
nx_driver.of.max_sg_len/sizeof(struct nx_sg)); }
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
len = SHA256_DIGEST_SIZE; static int nx_sha256_init(struct shash_desc *desc) {
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, struct sha256_state *sctx = shash_desc_ctx(desc);
&len, max_sg_len);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (len != SHA256_DIGEST_SIZE) memset(sctx, 0, sizeof *sctx);
return -EINVAL;
sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1); sctx->state[1] = __cpu_to_be32(SHA256_H1);
@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg; struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total; u64 to_process = 0, leftover, total;
unsigned long irq_flags; unsigned long irq_flags;
int rc = 0; int rc = 0;
@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_ctx->ap->databytelen/NX_PAGE_SIZE);
data_len = SHA256_DIGEST_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
&data_len, max_sg_len);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (data_len != SHA256_DIGEST_SIZE) {
rc = -EINVAL;
goto out;
}
do { do {
/* /*
* to_process: the SHA256_BLOCK_SIZE data chunk to process in * to_process: the SHA256_BLOCK_SIZE data chunk to process in
@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE, .cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_sha_init, .cra_init = nx_crypto_ctx_sha256_init,
.cra_exit = nx_crypto_ctx_exit, .cra_exit = nx_crypto_ctx_exit,
} }
}; };

View File

@ -28,34 +28,29 @@
#include "nx.h" #include "nx.h"
static int nx_sha512_init(struct shash_desc *desc) static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{ {
struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); int err;
struct nx_sg *out_sg;
int len; err = nx_crypto_ctx_sha_init(tfm);
u32 max_sg_len; if (err)
return err;
nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx_init(nx_ctx, HCOP_FC_SHA);
memset(sctx, 0, sizeof *sctx);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
max_sg_len = min_t(u64, nx_ctx->ap->sglen, return 0;
nx_driver.of.max_sg_len/sizeof(struct nx_sg)); }
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
len = SHA512_DIGEST_SIZE; static int nx_sha512_init(struct shash_desc *desc)
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, {
&len, max_sg_len); struct sha512_state *sctx = shash_desc_ctx(desc);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (len != SHA512_DIGEST_SIZE) memset(sctx, 0, sizeof *sctx);
return -EINVAL;
sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[1] = __cpu_to_be64(SHA512_H1);
@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg; struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total; u64 to_process, leftover = 0, total;
unsigned long irq_flags; unsigned long irq_flags;
int rc = 0; int rc = 0;
@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
max_sg_len = min_t(u64, max_sg_len, max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE); nx_ctx->ap->databytelen/NX_PAGE_SIZE);
data_len = SHA512_DIGEST_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
&data_len, max_sg_len);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (data_len != SHA512_DIGEST_SIZE) {
rc = -EINVAL;
goto out;
}
do { do {
/* /*
* to_process: the SHA512_BLOCK_SIZE data chunk to process in * to_process: the SHA512_BLOCK_SIZE data chunk to process in
@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_blocksize = SHA512_BLOCK_SIZE, .cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_sha_init, .cra_init = nx_crypto_ctx_sha512_init,
.cra_exit = nx_crypto_ctx_exit, .cra_exit = nx_crypto_ctx_exit,
} }
}; };

View File

@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* entry points from the crypto tfm initializers */ /* entry points from the crypto tfm initializers */
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
{ {
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
sizeof(struct nx_ccm_rctx));
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM); NX_MODE_AES_CCM);
} }
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
{ {
crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_GCM); NX_MODE_AES_GCM);
} }

View File

@ -2,6 +2,8 @@
#ifndef __NX_H__ #ifndef __NX_H__
#define __NX_H__ #define __NX_H__
#include <crypto/ctr.h>
#define NX_NAME "nx-crypto" #define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0" #define NX_VERSION "1.0"
@ -91,8 +93,11 @@ struct nx_crypto_driver {
#define NX_GCM4106_NONCE_LEN (4) #define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12) #define NX_GCM_CTR_OFFSET (12)
struct nx_gcm_priv { struct nx_gcm_rctx {
u8 iv[16]; u8 iv[16];
};
struct nx_gcm_priv {
u8 iauth_tag[16]; u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN]; u8 nonce[NX_GCM4106_NONCE_LEN];
}; };
@ -100,8 +105,11 @@ struct nx_gcm_priv {
#define NX_CCM_AES_KEY_LEN (16) #define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19) #define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3) #define NX_CCM4309_NONCE_LEN (3)
struct nx_ccm_priv { struct nx_ccm_rctx {
u8 iv[16]; u8 iv[16];
};
struct nx_ccm_priv {
u8 b0[16]; u8 b0[16];
u8 iauth_tag[16]; u8 iauth_tag[16];
u8 oauth_tag[16]; u8 oauth_tag[16];
@ -113,7 +121,7 @@ struct nx_xcbc_priv {
}; };
struct nx_ctr_priv { struct nx_ctr_priv {
u8 iv[16]; u8 nonce[CTR_RFC3686_NONCE_SIZE];
}; };
struct nx_crypto_ctx { struct nx_crypto_ctx {

View File

@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out); dmaengine_terminate_all(dd->dma_lch_out);
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
return err; return err;
} }

View File

@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
struct ib_ah *ah; struct ib_ah *ah;
struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wr_private *mad_send_wr;
if (device->node_type == RDMA_NODE_IB_SWITCH) if (rdma_cap_ib_switch(device))
port_priv = ib_get_agent_port(device, 0); port_priv = ib_get_agent_port(device, 0);
else else
port_priv = ib_get_agent_port(device, port_num); port_priv = ib_get_agent_port(device, port_num);
@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
memcpy(send_buf->mad, mad_hdr, resp_mad_len); memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah; send_buf->ah = ah;
if (device->node_type == RDMA_NODE_IB_SWITCH) { if (rdma_cap_ib_switch(device)) {
mad_send_wr = container_of(send_buf, mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private, struct ib_mad_send_wr_private,
send_buf); send_buf);

View File

@ -169,6 +169,7 @@ struct cm_device {
struct ib_device *ib_device; struct ib_device *ib_device;
struct device *device; struct device *device;
u8 ack_delay; u8 ack_delay;
int going_down;
struct cm_port *port[0]; struct cm_port *port[0];
}; };
@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{ {
int wait_time; int wait_time;
unsigned long flags; unsigned long flags;
struct cm_device *cm_dev;
cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
if (!cm_dev)
return;
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info); cm_cleanup_timewait(cm_id_priv->timewait_info);
@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
*/ */
cm_id_priv->id.state = IB_CM_TIMEWAIT; cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time)); /* Check if the device started its remove_one */
spin_lock_irq(&cm.lock);
if (!cm_dev->going_down)
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
spin_unlock_irq(&cm.lock);
cm_id_priv->timewait_info = NULL; cm_id_priv->timewait_info = NULL;
} }
@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
struct cm_work *work; struct cm_work *work;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
struct cm_device *cm_dev;
cm_dev = ib_get_client_data(cm_id->device, &cm_client);
if (!cm_dev)
return -ENODEV;
work = kmalloc(sizeof *work, GFP_ATOMIC); work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work) if (!work)
@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->remote_id = cm_id->remote_id; work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL; work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED; work->cm_event.event = IB_CM_USER_ESTABLISHED;
queue_delayed_work(cm.wq, &work->work, 0);
/* Check if the device started its remove_one */
spin_lock_irq(&cm.lock);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
spin_unlock_irq(&cm.lock);
out: out:
return ret; return ret;
} }
@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
enum ib_cm_event_type event; enum ib_cm_event_type event;
u16 attr_id; u16 attr_id;
int paths = 0; int paths = 0;
int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID: case CM_REQ_ATTR_ID:
@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
work->cm_event.event = event; work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc; work->mad_recv_wc = mad_recv_wc;
work->port = port; work->port = port;
queue_delayed_work(cm.wq, &work->work, 0);
/* Check if the device started its remove_one */
spin_lock_irq(&cm.lock);
if (!port->cm_dev->going_down)
queue_delayed_work(cm.wq, &work->work, 0);
else
going_down = 1;
spin_unlock_irq(&cm.lock);
if (going_down) {
kfree(work);
ib_free_recv_mad(mad_recv_wc);
}
} }
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->ib_device = ib_device; cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev); cm_get_ack_delay(cm_dev);
cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev, cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL, MKDEV(0, 0), NULL,
"%s", ib_device->name); "%s", ib_device->name);
@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
list_del(&cm_dev->list); list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags); write_unlock_irqrestore(&cm.device_lock, flags);
spin_lock_irq(&cm.lock);
cm_dev->going_down = 1;
spin_unlock_irq(&cm.lock);
for (i = 1; i <= ib_device->phys_port_cnt; i++) { for (i = 1; i <= ib_device->phys_port_cnt; i++) {
if (!rdma_cap_ib_cm(ib_device, i)) if (!rdma_cap_ib_cm(ib_device, i))
continue; continue;
port = cm_dev->port[i-1]; port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent); /*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq); flush_workqueue(cm.wq);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port); cm_remove_port_fs(port);
} }
device_unregister(cm_dev->device); device_unregister(cm_dev->device);

View File

@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client"; err_str = "Invalid port mapper client";
goto pid_query_error; goto pid_query_error;
} }
if (iwpm_registered_client(nl_client)) if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0; return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
if (!skb) { if (!skb) {
@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_set_registered(nl_client, 1);
iwpm_user_pid = IWPM_PID_UNAVAILABLE; iwpm_user_pid = IWPM_PID_UNAVAILABLE;
err_str = "Unable to send a nlmsg"; err_str = "Unable to send a nlmsg";
goto pid_query_error; goto pid_query_error;
@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client"; err_str = "Invalid port mapper client";
goto add_mapping_error; goto add_mapping_error;
} }
if (!iwpm_registered_client(nl_client)) { if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client"; err_str = "Unregistered port mapper client";
goto add_mapping_error; goto add_mapping_error;
} }
if (!iwpm_valid_pid())
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
if (!skb) { if (!skb) {
err_str = "Unable to create a nlmsg"; err_str = "Unable to create a nlmsg";
@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client"; err_str = "Invalid port mapper client";
goto query_mapping_error; goto query_mapping_error;
} }
if (!iwpm_registered_client(nl_client)) { if (!iwpm_valid_pid())
return 0;
if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client"; err_str = "Unregistered port mapper client";
goto query_mapping_error; goto query_mapping_error;
} }
if (!iwpm_valid_pid())
return 0;
ret = -ENOMEM; ret = -ENOMEM;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
if (!skb) { if (!skb) {
@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
err_str = "Invalid port mapper client"; err_str = "Invalid port mapper client";
goto remove_mapping_error; goto remove_mapping_error;
} }
if (!iwpm_registered_client(nl_client)) { if (!iwpm_valid_pid())
return 0;
if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
err_str = "Unregistered port mapper client"; err_str = "Unregistered port mapper client";
goto remove_mapping_error; goto remove_mapping_error;
} }
if (!iwpm_valid_pid())
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
if (!skb) { if (!skb) {
ret = -ENOMEM; ret = -ENOMEM;
@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid); __func__, iwpm_user_pid);
if (iwpm_valid_client(nl_client)) if (iwpm_valid_client(nl_client))
iwpm_set_registered(nl_client, 1); iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit: register_pid_response_exit:
nlmsg_request->request_done = 1; nlmsg_request->request_done = 1;
/* always for found nlmsg_request */ /* always for found nlmsg_request */
@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
const char *msg_type = "Mapping Info response"; const char *msg_type = "Mapping Info response";
int iwpm_pid;
u8 nl_client; u8 nl_client;
char *iwpm_name; char *iwpm_name;
u16 iwpm_version; u16 iwpm_version;
@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
__func__, nl_client); __func__, nl_client);
return ret; return ret;
} }
iwpm_set_registered(nl_client, 0); iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
iwpm_user_pid = cb->nlh->nlmsg_pid;
if (!iwpm_mapinfo_available()) if (!iwpm_mapinfo_available())
return 0; return 0;
iwpm_pid = cb->nlh->nlmsg_pid;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_pid); __func__, iwpm_user_pid);
ret = iwpm_send_mapinfo(nl_client, iwpm_pid); ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret; return ret;
} }
EXPORT_SYMBOL(iwpm_mapping_info_cb); EXPORT_SYMBOL(iwpm_mapping_info_cb);

View File

@ -78,6 +78,7 @@ init_exit:
mutex_unlock(&iwpm_admin_lock); mutex_unlock(&iwpm_admin_lock);
if (!ret) { if (!ret) {
iwpm_set_valid(nl_client, 1); iwpm_set_valid(nl_client, 1);
iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
pr_debug("%s: Mapinfo and reminfo tables are created\n", pr_debug("%s: Mapinfo and reminfo tables are created\n",
__func__); __func__);
} }
@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
} }
mutex_unlock(&iwpm_admin_lock); mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0); iwpm_set_valid(nl_client, 0);
iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0; return 0;
} }
EXPORT_SYMBOL(iwpm_exit); EXPORT_SYMBOL(iwpm_exit);
@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
} }
/* valid client */ /* valid client */
int iwpm_registered_client(u8 nl_client) u32 iwpm_get_registration(u8 nl_client)
{ {
return iwpm_admin.reg_list[nl_client]; return iwpm_admin.reg_list[nl_client];
} }
/* valid client */ /* valid client */
void iwpm_set_registered(u8 nl_client, int reg) void iwpm_set_registration(u8 nl_client, u32 reg)
{ {
iwpm_admin.reg_list[nl_client] = reg; iwpm_admin.reg_list[nl_client] = reg;
} }
/* valid client */
u32 iwpm_check_registration(u8 nl_client, u32 reg)
{
return (iwpm_get_registration(nl_client) & reg);
}
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
struct sockaddr_storage *b_sockaddr) struct sockaddr_storage *b_sockaddr)
{ {

View File

@ -58,6 +58,10 @@
#define IWPM_PID_UNDEFINED -1 #define IWPM_PID_UNDEFINED -1
#define IWPM_PID_UNAVAILABLE -2 #define IWPM_PID_UNAVAILABLE -2
#define IWPM_REG_UNDEF 0x01
#define IWPM_REG_VALID 0x02
#define IWPM_REG_INCOMPL 0x04
struct iwpm_nlmsg_request { struct iwpm_nlmsg_request {
struct list_head inprocess_list; struct list_head inprocess_list;
__u32 nlmsg_seq; __u32 nlmsg_seq;
@ -88,7 +92,7 @@ struct iwpm_admin_data {
atomic_t refcount; atomic_t refcount;
atomic_t nlmsg_seq; atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS]; int client_list[RDMA_NL_NUM_CLIENTS];
int reg_list[RDMA_NL_NUM_CLIENTS]; u32 reg_list[RDMA_NL_NUM_CLIENTS];
}; };
/** /**
@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
void iwpm_set_valid(u8 nl_client, int valid); void iwpm_set_valid(u8 nl_client, int valid);
/** /**
* iwpm_registered_client - Check if the port mapper client is registered * iwpm_check_registration - Check if the client registration
* matches the given one
* @nl_client: The index of the netlink client * @nl_client: The index of the netlink client
* @reg: The given registration type to compare with
* *
* Call iwpm_register_pid() to register a client * Call iwpm_register_pid() to register a client
* Returns true if the client registration matches reg,
* otherwise returns false
*/ */
int iwpm_registered_client(u8 nl_client); u32 iwpm_check_registration(u8 nl_client, u32 reg);
/** /**
* iwpm_set_registered - Set the port mapper client to registered or not * iwpm_set_registration - Set the client registration
* @nl_client: The index of the netlink client * @nl_client: The index of the netlink client
* @reg: 1 if registered or 0 if not * @reg: Registration type to set
*/ */
void iwpm_set_registered(u8 nl_client, int reg); void iwpm_set_registration(u8 nl_client, u32 reg);
/**
* iwpm_get_registration
* @nl_client: The index of the netlink client
*
* Returns the client registration type
*/
u32 iwpm_get_registration(u8 nl_client);
/** /**
* iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of

View File

@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
mad_agent_priv->qp_info->port_priv->port_num); mad_agent_priv->qp_info->port_priv->port_num);
if (device->node_type == RDMA_NODE_IB_SWITCH && if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
port_num = send_wr->wr.ud.port_num; port_num = send_wr->wr.ud.port_num;
else else
@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
if ((opa_get_smp_direction(opa_smp) if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE && OPA_LID_PERMISSIVE &&
opa_smi_handle_dr_smp_send(opa_smp, device->node_type, opa_smi_handle_dr_smp_send(opa_smp,
rdma_cap_ib_switch(device),
port_num) == IB_SMI_DISCARD) { port_num) == IB_SMI_DISCARD) {
ret = -EINVAL; ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid directed route\n"); dev_err(&device->dev, "OPA Invalid directed route\n");
goto out; goto out;
} }
opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
if (opa_drslid != OPA_LID_PERMISSIVE && if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
opa_drslid & 0xffff0000) { opa_drslid & 0xffff0000) {
ret = -EINVAL; ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
} else { } else {
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE && IB_LID_PERMISSIVE &&
smi_handle_dr_smp_send(smp, device->node_type, port_num) == smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
IB_SMI_DISCARD) { IB_SMI_DISCARD) {
ret = -EINVAL; ret = -EINVAL;
dev_err(&device->dev, "Invalid directed route\n"); dev_err(&device->dev, "Invalid directed route\n");
@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
struct ib_smp *smp = (struct ib_smp *)recv->mad; struct ib_smp *smp = (struct ib_smp *)recv->mad;
if (smi_handle_dr_smp_recv(smp, if (smi_handle_dr_smp_recv(smp,
port_priv->device->node_type, rdma_cap_ib_switch(port_priv->device),
port_num, port_num,
port_priv->device->phys_port_cnt) == port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD) IB_SMI_DISCARD)
@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
if (retsmi == IB_SMI_SEND) { /* don't forward */ if (retsmi == IB_SMI_SEND) { /* don't forward */
if (smi_handle_dr_smp_send(smp, if (smi_handle_dr_smp_send(smp,
port_priv->device->node_type, rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD) port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */ /* forward case for switches */
memcpy(response, recv, mad_priv_size(response)); memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.wc = &response->header.wc;
@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
struct opa_smp *smp = (struct opa_smp *)recv->mad; struct opa_smp *smp = (struct opa_smp *)recv->mad;
if (opa_smi_handle_dr_smp_recv(smp, if (opa_smi_handle_dr_smp_recv(smp,
port_priv->device->node_type, rdma_cap_ib_switch(port_priv->device),
port_num, port_num,
port_priv->device->phys_port_cnt) == port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD) IB_SMI_DISCARD)
@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
if (retsmi == IB_SMI_SEND) { /* don't forward */ if (retsmi == IB_SMI_SEND) { /* don't forward */
if (opa_smi_handle_dr_smp_send(smp, if (opa_smi_handle_dr_smp_send(smp,
port_priv->device->node_type, rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD) port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
IB_SMI_DISCARD) IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */ /* forward case for switches */
memcpy(response, recv, mad_priv_size(response)); memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.wc = &response->header.wc;
@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
goto out; goto out;
} }
if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num; port_num = wc->port_num;
else else
port_num = port_priv->port_num; port_num = port_priv->port_num;
@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
static void ib_mad_init_device(struct ib_device *device) static void ib_mad_init_device(struct ib_device *device)
{ {
int start, end, i; int start, i;
if (device->node_type == RDMA_NODE_IB_SWITCH) { start = rdma_start_port(device);
start = 0;
end = 0;
} else {
start = 1;
end = device->phys_port_cnt;
}
for (i = start; i <= end; i++) { for (i = start; i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i)) if (!rdma_cap_ib_mad(device, i))
continue; continue;
@ -3342,17 +3337,9 @@ error:
static void ib_mad_remove_device(struct ib_device *device) static void ib_mad_remove_device(struct ib_device *device)
{ {
int start, end, i; int i;
if (device->node_type == RDMA_NODE_IB_SWITCH) { for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
start = 0;
end = 0;
} else {
start = 1;
end = device->phys_port_cnt;
}
for (i = start; i <= end; i++) {
if (!rdma_cap_ib_mad(device, i)) if (!rdma_cap_ib_mad(device, i))
continue; continue;

View File

@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
if (!dev) if (!dev)
return; return;
if (device->node_type == RDMA_NODE_IB_SWITCH) dev->start_port = rdma_start_port(device);
dev->start_port = dev->end_port = 0; dev->end_port = rdma_end_port(device);
else {
dev->start_port = 1;
dev->end_port = device->phys_port_cnt;
}
for (i = 0; i <= dev->end_port - dev->start_port; i++) { for (i = 0; i <= dev->end_port - dev->start_port; i++) {
if (!rdma_cap_ib_mcast(device, dev->start_port + i)) if (!rdma_cap_ib_mcast(device, dev->start_port + i))

View File

@ -39,12 +39,12 @@
#include "smi.h" #include "smi.h"
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt); int port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp); int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
u8 node_type, int port_num); bool is_switch, int port_num);
/* /*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM

View File

@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
int s, e, i; int s, e, i;
int count = 0; int count = 0;
if (device->node_type == RDMA_NODE_IB_SWITCH) s = rdma_start_port(device);
s = e = 0; e = rdma_end_port(device);
else {
s = 1;
e = device->phys_port_cnt;
}
sa_dev = kzalloc(sizeof *sa_dev + sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port), (e - s + 1) * sizeof (struct ib_sa_port),

View File

@ -41,7 +41,7 @@
#include "smi.h" #include "smi.h"
#include "opa_smi.h" #include "opa_smi.h"
static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
u8 *hop_ptr, u8 hop_cnt, u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path, const u8 *initial_path,
const u8 *return_path, const u8 *return_path,
@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-9:2 */ /* C14-9:2 */
if (*hop_ptr && *hop_ptr < hop_cnt) { if (*hop_ptr && *hop_ptr < hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH) if (!is_switch)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
/* return_path set when received */ /* return_path set when received */
@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == hop_cnt) { if (*hop_ptr == hop_cnt) {
/* return_path set when received */ /* return_path set when received */
(*hop_ptr)++; (*hop_ptr)++;
return (node_type == RDMA_NODE_IB_SWITCH || return (is_switch ||
dr_dlid_is_permissive ? dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD); IB_SMI_HANDLE : IB_SMI_DISCARD);
} }
@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-13:2 */ /* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH) if (!is_switch)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
(*hop_ptr)--; (*hop_ptr)--;
@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == 1) { if (*hop_ptr == 1) {
(*hop_ptr)--; (*hop_ptr)--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */ /* C14-13:3 -- SMPs destined for SM shouldn't be here */
return (node_type == RDMA_NODE_IB_SWITCH || return (is_switch ||
dr_slid_is_permissive ? dr_slid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD); IB_SMI_HANDLE : IB_SMI_DISCARD);
} }
@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded * Return IB_SMI_DISCARD if the SMP should be discarded
*/ */
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num) bool is_switch, int port_num)
{ {
return __smi_handle_dr_smp_send(node_type, port_num, return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt, &smp->hop_ptr, smp->hop_cnt,
smp->initial_path, smp->initial_path,
smp->return_path, smp->return_path,
@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
} }
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
u8 node_type, int port_num) bool is_switch, int port_num)
{ {
return __smi_handle_dr_smp_send(node_type, port_num, return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt, &smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path, smp->route.dr.initial_path,
smp->route.dr.return_path, smp->route.dr.return_path,
@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE); OPA_LID_PERMISSIVE);
} }
static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
int phys_port_cnt, int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt, u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path, const u8 *initial_path,
@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-9:2 -- intermediate hop */ /* C14-9:2 -- intermediate hop */
if (*hop_ptr && *hop_ptr < hop_cnt) { if (*hop_ptr && *hop_ptr < hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH) if (!is_switch)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
return_path[*hop_ptr] = port_num; return_path[*hop_ptr] = port_num;
@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return_path[*hop_ptr] = port_num; return_path[*hop_ptr] = port_num;
/* hop_ptr updated when sending */ /* hop_ptr updated when sending */
return (node_type == RDMA_NODE_IB_SWITCH || return (is_switch ||
dr_dlid_is_permissive ? dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD); IB_SMI_HANDLE : IB_SMI_DISCARD);
} }
@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-13:2 */ /* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH) if (!is_switch)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
/* hop_ptr updated when sending */ /* hop_ptr updated when sending */
@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return IB_SMI_HANDLE; return IB_SMI_HANDLE;
} }
/* hop_ptr updated when sending */ /* hop_ptr updated when sending */
return (node_type == RDMA_NODE_IB_SWITCH ? return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
IB_SMI_HANDLE : IB_SMI_DISCARD);
} }
/* C14-13:4 -- hop_ptr = 0 -> give to SM */ /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
* Adjust information for a received SMP * Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped * Return IB_SMI_DISCARD if the SMP should be dropped
*/ */
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt) int port_num, int phys_port_cnt)
{ {
return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt, &smp->hop_ptr, smp->hop_cnt,
smp->initial_path, smp->initial_path,
smp->return_path, smp->return_path,
@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
* Adjust information for a received SMP * Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped * Return IB_SMI_DISCARD if the SMP should be dropped
*/ */
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt) int port_num, int phys_port_cnt)
{ {
return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt, &smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path, smp->route.dr.initial_path,
smp->route.dr.return_path, smp->route.dr.return_path,

View File

@ -51,12 +51,12 @@ enum smi_forward_action {
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
}; };
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt); int port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp); int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num); bool is_switch, int port_num);
/* /*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM

View File

@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
goto err_put; goto err_put;
} }
if (device->node_type == RDMA_NODE_IB_SWITCH) { if (rdma_cap_ib_switch(device)) {
ret = add_port(device, 0, port_callback); ret = add_port(device, 0, port_callback);
if (ret) if (ret)
goto err_put; goto err_put;

View File

@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev) static void ib_ucm_release_dev(struct device *dev)
{ {
struct ib_ucm_device *ucm_dev; struct ib_ucm_device *ucm_dev;
@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map); clear_bit(ucm_dev->devnum, dev_map);
else else
clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
kfree(ucm_dev); kfree(ucm_dev);
} }
@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static dev_t overflow_maj; static dev_t overflow_maj;
static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static int find_overflow_devnum(void) static int find_overflow_devnum(void)
{ {
int ret; int ret;

View File

@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
/* Acquire mutex's based on pointer comparison to prevent deadlock. */ /* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) { if (file1 < file2) {
mutex_lock(&file1->mut); mutex_lock(&file1->mut);
mutex_lock(&file2->mut); mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
} else { } else {
mutex_lock(&file2->mut); mutex_lock(&file2->mut);
mutex_lock(&file1->mut); mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
} }
} }
@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc); misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr); idr_destroy(&ctx_idr);
idr_destroy(&multicast_idr);
} }
module_init(ucma_init); module_init(ucma_init);

View File

@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE; return IB_MAD_RESULT_FAILURE;

View File

@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) { switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:

View File

@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock); spin_lock_init(&idev->lk_table.lock);
idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
/* Set the prefix to the default value (see ch. 4.1.1) */ /* Set the prefix to the default value (see ch. 4.1.1) */
idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
if (ret) if (ret)

View File

@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
switch (rdma_port_get_link_layer(ibdev, port_num)) { /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
case IB_LINK_LAYER_INFINIBAND: * queries, should be called only by VFs and for that specific purpose
if (!mlx4_is_slave(dev->dev)) */
return ib_process_mad(ibdev, mad_flags, port_num, in_wc, if (link == IB_LINK_LAYER_INFINIBAND) {
in_grh, in_mad, out_mad); if (mlx4_is_slave(dev->dev) &&
case IB_LINK_LAYER_ETHERNET: in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
in_grh, in_mad, out_mad); return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
default: in_grh, in_mad, out_mad);
return -EINVAL;
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
} }
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
return -EINVAL;
} }
static void send_handler(struct ib_mad_agent *agent, static void send_handler(struct ib_mad_agent *agent,

View File

@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL; props->timestamp_mask = 0xFFFFFFFFFFFFULL;
err = mlx4_get_internal_clock_params(dev->dev, &clock_params); if (!mlx4_is_slave(dev->dev))
if (err) err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
goto out;
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset); resp.response_length += sizeof(resp.hca_core_clock_offset);
resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; if (!err && !mlx4_is_slave(dev->dev)) {
resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
} }
if (uhw->outlen) { if (uhw->outlen) {
@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) { if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n"); pr_err("failed to allocate memory for tunneling qp update\n");
goto out; return;
} }
for (i = 0; i < ports; i++) { for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) { if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n"); pr_err("failed to allocate memory for tunneling qp update work struct\n");
for (i = 0; i < dev->caps.num_ports; i++) { while (--i >= 0)
if (dm[i]) kfree(dm[i]);
kfree(dm[i]);
}
goto out; goto out;
} }
}
/* initialize or tear down tunnel QPs for the slave */
for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
dm[i]->port = first_port + i + 1; dm[i]->port = first_port + i + 1;
dm[i]->slave = slave; dm[i]->slave = slave;
dm[i]->do_init = do_init; dm[i]->do_init = do_init;
dm[i]->dev = ibdev; dm[i]->dev = ibdev;
spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); }
if (!ibdev->sriov.is_going_down) /* initialize or tear down tunnel QPs for the slave */
spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
if (!ibdev->sriov.is_going_down) {
for (i = 0; i < ports; i++)
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
} else {
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
for (i = 0; i < ports; i++)
kfree(dm[i]);
} }
out: out:
kfree(dm); kfree(dm);

View File

@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);

View File

@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */ /* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&

View File

@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
int rc = arpindex; int rc = arpindex;
struct net_device *netdev; struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
__be32 dst_ipaddr = htonl(dst_ip);
rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip); __func__, dst_ip);
@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else else
netdev = nesvnic->netdev; netdev = nesvnic->netdev;
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock(); rcu_read_lock();
if (neigh) { if (neigh) {

View File

@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
(((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
(((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
(((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
} else { } else {
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;

View File

@ -215,8 +215,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) { switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT: case IB_MGMT_CLASS_PERF_MGMT:

View File

@ -696,6 +696,7 @@ static void __exit ocrdma_exit_module(void)
ocrdma_unregister_inet6addr_notifier(); ocrdma_unregister_inet6addr_notifier();
ocrdma_unregister_inetaddr_notifier(); ocrdma_unregister_inetaddr_notifier();
ocrdma_rem_debugfs(); ocrdma_rem_debugfs();
idr_destroy(&ocrdma_dev_id);
} }
module_init(ocrdma_init_module); module_init(ocrdma_init_module);

View File

@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
BUG_ON(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)); *out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) { switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:

View File

@ -239,7 +239,7 @@ struct ipoib_cm_tx {
struct net_device *dev; struct net_device *dev;
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
struct ipoib_path *path; struct ipoib_path *path;
struct ipoib_cm_tx_buf *tx_ring; struct ipoib_tx_buf *tx_ring;
unsigned tx_head; unsigned tx_head;
unsigned tx_tail; unsigned tx_tail;
unsigned long flags; unsigned long flags;
@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev);
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req);
static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req)
{
int i, off;
struct sk_buff *skb = tx_req->skb;
skb_frag_t *frags = skb_shinfo(skb)->frags;
int nr_frags = skb_shinfo(skb)->nr_frags;
u64 *mapping = tx_req->mapping;
if (skb_headlen(skb)) {
priv->tx_sge[0].addr = mapping[0];
priv->tx_sge[0].length = skb_headlen(skb);
off = 1;
} else
off = 0;
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + off].addr = mapping[i + off];
priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
}
priv->tx_wr.num_sge = nr_frags + off;
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);

View File

@ -694,14 +694,12 @@ repost:
static inline int post_send(struct ipoib_dev_priv *priv, static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx, struct ipoib_cm_tx *tx,
unsigned int wr_id, unsigned int wr_id,
u64 addr, int len) struct ipoib_tx_buf *tx_req)
{ {
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
priv->tx_sge[0].addr = addr; ipoib_build_sge(priv, tx_req);
priv->tx_sge[0].length = len;
priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
u64 addr;
int rc; int rc;
if (unlikely(skb->len > tx->mtu)) { if (unlikely(skb->len > tx->mtu)) {
@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
*/ */
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb; tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors; ++dev->stats.tx_errors;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
tx_req->mapping = addr;
skb_orphan(skb); skb_orphan(skb);
skb_dst_drop(skb); skb_dst_drop(skb);
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
addr, skb->len);
if (unlikely(rc)) { if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc); ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors; ++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else { } else {
dev->trans_start = jiffies; dev->trans_start = jiffies;
@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context; struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
struct ipoib_cm_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
unsigned long flags; unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &tx->tx_ring[wr_id]; tx_req = &tx->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); ipoib_dma_unmap_tx(priv, tx_req);
/* FIXME: is this right? Shouldn't we only increment on success? */ /* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets; ++dev->stats.tx_packets;
@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp; struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
tx_qp = ib_create_qp(priv->pd, &attr); tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) { if (PTR_ERR(tx_qp) == -EINVAL) {
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@ -1170,7 +1167,7 @@ err_tx:
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{ {
struct ipoib_dev_priv *priv = netdev_priv(p->dev); struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
unsigned long begin; unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@ -1197,8 +1194,7 @@ timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) { while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, ipoib_dma_unmap_tx(priv, tx_req);
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++p->tx_tail; ++p->tx_tail;
netif_tx_lock_bh(p->dev); netif_tx_lock_bh(p->dev);
@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
} }
static ssize_t show_mode(struct device *d, struct device_attribute *attr, static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf) char *buf)
{ {

View File

@ -263,8 +263,7 @@ repost:
"for buf %d\n", wr_id); "for buf %d\n", wr_id);
} }
static int ipoib_dma_map_tx(struct ib_device *ca, int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
struct ipoib_tx_buf *tx_req)
{ {
struct sk_buff *skb = tx_req->skb; struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping; u64 *mapping = tx_req->mapping;
@ -305,8 +304,8 @@ partial_error:
return -EIO; return -EIO;
} }
static void ipoib_dma_unmap_tx(struct ib_device *ca, void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req) struct ipoib_tx_buf *tx_req)
{ {
struct sk_buff *skb = tx_req->skb; struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping; u64 *mapping = tx_req->mapping;
@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
int off; int off;
if (skb_headlen(skb)) { if (skb_headlen(skb)) {
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
DMA_TO_DEVICE);
off = 1; off = 1;
} else } else
off = 0; off = 0;
@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), ib_dma_unmap_page(priv->ca, mapping[i + off],
DMA_TO_DEVICE); skb_frag_size(frag), DMA_TO_DEVICE);
} }
} }
@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id]; tx_req = &priv->tx_ring[wr_id];
ipoib_dma_unmap_tx(priv->ca, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
++dev->stats.tx_packets; ++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len; dev->stats.tx_bytes += tx_req->skb->len;
@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void *head, int hlen) void *head, int hlen)
{ {
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
int i, off;
struct sk_buff *skb = tx_req->skb; struct sk_buff *skb = tx_req->skb;
skb_frag_t *frags = skb_shinfo(skb)->frags;
int nr_frags = skb_shinfo(skb)->nr_frags;
u64 *mapping = tx_req->mapping;
if (skb_headlen(skb)) { ipoib_build_sge(priv, tx_req);
priv->tx_sge[0].addr = mapping[0];
priv->tx_sge[0].length = skb_headlen(skb);
off = 1;
} else
off = 0;
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + off].addr = mapping[i + off];
priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
}
priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address; priv->tx_wr.wr.ud.ah = address;
@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
ipoib_warn(priv, "post_send failed, error %d\n", rc); ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors; ++dev->stats.tx_errors;
--priv->tx_outstanding; --priv->tx_outstanding;
ipoib_dma_unmap_tx(priv->ca, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
netif_wake_queue(dev); netif_wake_queue(dev);
@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) { while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail & tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)]; (ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv->ca, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail; ++priv->tx_tail;
--priv->tx_outstanding; --priv->tx_outstanding;
@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
} }
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level) enum ipoib_flush_level level,
int nesting)
{ {
struct ipoib_dev_priv *cpriv; struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
int result; int result;
down_read(&priv->vlan_rwsem); down_read_nested(&priv->vlan_rwsem, nesting);
/* /*
* Flush any child interfaces too -- they might be up even if * Flush any child interfaces too -- they might be up even if
* the parent is down. * the parent is down.
*/ */
list_for_each_entry(cpriv, &priv->child_intfs, list) list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level); __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
up_read(&priv->vlan_rwsem); up_read(&priv->vlan_rwsem);
@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv = struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light); container_of(work, struct ipoib_dev_priv, flush_light);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
} }
void ipoib_ib_dev_flush_normal(struct work_struct *work) void ipoib_ib_dev_flush_normal(struct work_struct *work)
@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv = struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal); container_of(work, struct ipoib_dev_priv, flush_normal);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
} }
void ipoib_ib_dev_flush_heavy(struct work_struct *work) void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv = struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy); container_of(work, struct ipoib_dev_priv, flush_heavy);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
} }
void ipoib_ib_dev_cleanup(struct net_device *dev) void ipoib_ib_dev_cleanup(struct net_device *dev)

View File

@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
return features; return features;
} }
@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
ipoib_warn(priv, "enabling connected mode " ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n"); "will cause multicast packet drops\n");
netdev_update_features(dev); netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
rtnl_unlock(); rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device); SET_NETDEV_DEV(priv->dev, hca->dma_device);
priv->dev->dev_id = port - 1; priv->dev->dev_id = port - 1;
if (!ib_query_port(hca, port, &attr)) result = ib_query_port(hca, port, &attr);
if (!result)
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else { else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n", printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed; goto device_init_failed;
} }
if (ipoib_set_dev_features(priv, hca)) result = ipoib_set_dev_features(priv, hca);
if (result)
goto device_init_failed; goto device_init_failed;
/* /*
@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
struct list_head *dev_list; struct list_head *dev_list;
struct net_device *dev; struct net_device *dev;
struct ipoib_dev_priv *priv; struct ipoib_dev_priv *priv;
int s, e, p; int p;
int count = 0; int count = 0;
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
INIT_LIST_HEAD(dev_list); INIT_LIST_HEAD(dev_list);
if (device->node_type == RDMA_NODE_IB_SWITCH) { for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
s = 0;
e = 0;
} else {
s = 1;
e = device->phys_port_cnt;
}
for (p = s; p <= e; ++p) {
if (!rdma_protocol_ib(device, p)) if (!rdma_protocol_ib(device, p))
continue; continue;
dev = ipoib_add_port("ib%d", device, p); dev = ipoib_add_port("ib%d", device, p);

View File

@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{ {
int tmo, res; int tmo, res;
if (strncmp(val, "off", 3) != 0) { res = srp_parse_tmo(&tmo, val);
res = kstrtoint(val, 0, &tmo); if (res)
if (res) goto out;
goto out;
} else {
tmo = -1;
}
if (kp->arg == &srp_reconnect_delay) if (kp->arg == &srp_reconnect_delay)
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
srp_dev_loss_tmo); srp_dev_loss_tmo);
@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
struct srp_device *srp_dev; struct srp_device *srp_dev;
struct ib_device_attr *dev_attr; struct ib_device_attr *dev_attr;
struct srp_host *host; struct srp_host *host;
int mr_page_shift, s, e, p; int mr_page_shift, p;
u64 max_pages_per_mr; u64 max_pages_per_mr;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->mr)) if (IS_ERR(srp_dev->mr))
goto err_pd; goto err_pd;
if (device->node_type == RDMA_NODE_IB_SWITCH) { for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
s = 0;
e = 0;
} else {
s = 1;
e = device->phys_port_cnt;
}
for (p = s; p <= e; ++p) {
host = srp_add_port(srp_dev, p); host = srp_add_port(srp_dev, p);
if (host) if (host)
list_add_tail(&host->list, &srp_dev->dev_list); list_add_tail(&host->list, &srp_dev->dev_list);

Some files were not shown because too many files have changed in this diff Show More