mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/ixgbe/ixgbe_main.c include/net/mac80211.h net/phonet/af_phonet.c
This commit is contained in:
commit
6ab33d5171
@ -294,7 +294,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
Possible values are:
|
||||
isolate - enable device isolation (each device, as far
|
||||
as possible, will get its own protection
|
||||
domain)
|
||||
domain) [default]
|
||||
share - put every device behind one IOMMU into the
|
||||
same protection domain
|
||||
fullflush - enable flushing of IO/TLB entries when
|
||||
they are unmapped. Otherwise they are
|
||||
flushed before they will be reused, which
|
||||
@ -1193,8 +1195,8 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
it is equivalent to "nosmp", which also disables
|
||||
the IO APIC.
|
||||
|
||||
max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or
|
||||
equal to this physical address is ignored.
|
||||
max_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory greater than
|
||||
or equal to this physical address is ignored.
|
||||
|
||||
max_luns= [SCSI] Maximum number of LUNs to probe.
|
||||
Should be between 1 and 2^32-1.
|
||||
@ -1294,6 +1296,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
|
||||
mga= [HW,DRM]
|
||||
|
||||
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
|
||||
physical address is ignored.
|
||||
|
||||
mminit_loglevel=
|
||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||
parameter allows control of the logging verbosity for
|
||||
|
@ -96,7 +96,7 @@ Letting the PHY Abstraction Layer do Everything
|
||||
static void adjust_link(struct net_device *dev);
|
||||
|
||||
Next, you need to know the device name of the PHY connected to this device.
|
||||
The name will look something like, "phy0:0", where the first number is the
|
||||
The name will look something like, "0:00", where the first number is the
|
||||
bus id, and the second is the PHY's address on that bus. Typically,
|
||||
the bus is responsible for making its ID unique.
|
||||
|
||||
|
@ -1804,7 +1804,7 @@ S: Maintained
|
||||
|
||||
FTRACE
|
||||
P: Steven Rostedt
|
||||
M: srostedt@redhat.com
|
||||
M: rostedt@goodmis.org
|
||||
S: Maintained
|
||||
|
||||
FUJITSU FR-V (FRV) PORT
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 28
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Killer Bat of Doom
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -226,7 +226,7 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
|
||||
/************************************************/
|
||||
#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
|
||||
#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
|
||||
#define ia64_getreg IA64_INTRINSIC_API(getreg)
|
||||
#define ia64_getreg IA64_INTRINSIC_MACRO(getreg)
|
||||
#define ia64_setreg IA64_INTRINSIC_API(setreg)
|
||||
#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
|
||||
#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
|
||||
|
@ -78,6 +78,19 @@ extern unsigned long ia64_native_getreg_func(int regnum);
|
||||
ia64_native_rsm(mask); \
|
||||
} while (0)
|
||||
|
||||
/* returned ip value should be the one in the caller,
|
||||
* not in __paravirt_getreg() */
|
||||
#define paravirt_getreg(reg) \
|
||||
({ \
|
||||
unsigned long res; \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
|
||||
if ((reg) == _IA64_REG_IP) \
|
||||
res = ia64_native_getreg(_IA64_REG_IP); \
|
||||
else \
|
||||
res = pv_cpu_ops.getreg(reg); \
|
||||
res; \
|
||||
})
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of hand written assembly codes.
|
||||
*/
|
||||
|
@ -499,6 +499,7 @@ GLOBAL_ENTRY(prefetch_stack)
|
||||
END(prefetch_stack)
|
||||
|
||||
GLOBAL_ENTRY(kernel_execve)
|
||||
rum psr.ac
|
||||
mov r15=__NR_execve // put syscall number in place
|
||||
break __BREAK_SYSCALL
|
||||
br.ret.sptk.many rp
|
||||
|
@ -260,7 +260,7 @@ start_ap:
|
||||
* Switch into virtual mode:
|
||||
*/
|
||||
movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
|
||||
|IA64_PSR_DI)
|
||||
|IA64_PSR_DI|IA64_PSR_AC)
|
||||
;;
|
||||
mov cr.ipsr=r16
|
||||
movl r17=1f
|
||||
|
@ -1139,7 +1139,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
|
||||
return previous_current;
|
||||
|
||||
no_mod:
|
||||
printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
|
||||
mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
|
||||
smp_processor_id(), type, msg);
|
||||
return previous_current;
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ ia64_native_getreg_func(int regnum)
|
||||
unsigned long res = -1;
|
||||
switch (regnum) {
|
||||
CASE_GET_REG(GP);
|
||||
CASE_GET_REG(IP);
|
||||
/*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
|
||||
CASE_GET_REG(PSR);
|
||||
CASE_GET_REG(TP);
|
||||
CASE_GET_REG(SP);
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/iommu.h>
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly;
|
||||
EXPORT_SYMBOL(bad_dma_address);
|
||||
|
@ -58,7 +58,7 @@ __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
|
||||
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
|
||||
|
||||
#ifdef CONFIG_IA32_SUPPORT
|
||||
__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
|
||||
__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
|
||||
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
|
||||
#endif /* CONFIG_IA32_SUPPORT */
|
||||
|
||||
|
@ -84,5 +84,7 @@ extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned
|
||||
extern unsigned get_434_reg(unsigned reg_offs);
|
||||
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
|
||||
extern unsigned char get_latch_u5(void);
|
||||
extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
|
||||
extern void rb532_gpio_set_istat(int bit, unsigned gpio);
|
||||
|
||||
#endif /* _RC32434_GPIO_H_ */
|
||||
|
@ -40,12 +40,14 @@
|
||||
#define BTCS 0x010040
|
||||
#define BTCOMPARE 0x010044
|
||||
#define GPIOBASE 0x050000
|
||||
#define GPIOCFG 0x050004
|
||||
#define GPIOD 0x050008
|
||||
#define GPIOILEVEL 0x05000C
|
||||
#define GPIOISTAT 0x050010
|
||||
#define GPIONMIEN 0x050014
|
||||
#define IMASK6 0x038038
|
||||
/* Offsets relative to GPIOBASE */
|
||||
#define GPIOFUNC 0x00
|
||||
#define GPIOCFG 0x04
|
||||
#define GPIOD 0x08
|
||||
#define GPIOILEVEL 0x0C
|
||||
#define GPIOISTAT 0x10
|
||||
#define GPIONMIEN 0x14
|
||||
#define IMASK6 0x38
|
||||
#define LO_WPX (1 << 0)
|
||||
#define LO_ALE (1 << 1)
|
||||
#define LO_CLE (1 << 2)
|
||||
|
@ -63,7 +63,7 @@ static inline int mips_clockevent_init(void)
|
||||
/*
|
||||
* Initialize the count register as a clocksource
|
||||
*/
|
||||
#ifdef CONFIG_CEVT_R4K
|
||||
#ifdef CONFIG_CSRC_R4K
|
||||
extern int init_mips_clocksource(void);
|
||||
#else
|
||||
static inline int init_mips_clocksource(void)
|
||||
|
@ -27,7 +27,7 @@ int __init init_mips_clocksource(void)
|
||||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
|
||||
/* Calclate a somewhat reasonable rating value */
|
||||
/* Calculate a somewhat reasonable rating value */
|
||||
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
|
||||
|
||||
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
|
||||
|
@ -161,7 +161,7 @@ static inline int __init indy_sc_probe(void)
|
||||
|
||||
/* XXX Check with wje if the Indy caches can differenciate between
|
||||
writeback + invalidate and just invalidate. */
|
||||
struct bcache_ops indy_sc_ops = {
|
||||
static struct bcache_ops indy_sc_ops = {
|
||||
.bc_enable = indy_sc_enable,
|
||||
.bc_disable = indy_sc_disable,
|
||||
.bc_wback_inv = indy_sc_wback_invalidate,
|
||||
|
@ -22,9 +22,9 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm-mips/addrspace.h>
|
||||
#include <asm-mips/mips-boards/launch.h>
|
||||
#include <asm-mips/mipsmtregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/mips-boards/launch.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
|
||||
int amon_cpu_avail(int cpu)
|
||||
{
|
||||
|
@ -118,7 +118,7 @@ static struct platform_device cf_slot0 = {
|
||||
/* Resources and device for NAND */
|
||||
static int rb532_dev_ready(struct mtd_info *mtd)
|
||||
{
|
||||
return readl(IDT434_REG_BASE + GPIOD) & GPIO_RDY;
|
||||
return gpio_get_value(GPIO_RDY);
|
||||
}
|
||||
|
||||
static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
|
||||
|
@ -39,10 +39,6 @@
|
||||
struct rb532_gpio_chip {
|
||||
struct gpio_chip chip;
|
||||
void __iomem *regbase;
|
||||
void (*set_int_level)(struct gpio_chip *chip, unsigned offset, int value);
|
||||
int (*get_int_level)(struct gpio_chip *chip, unsigned offset);
|
||||
void (*set_int_status)(struct gpio_chip *chip, unsigned offset, int value);
|
||||
int (*get_int_status)(struct gpio_chip *chip, unsigned offset);
|
||||
};
|
||||
|
||||
struct mpmc_device dev3;
|
||||
@ -111,15 +107,47 @@ unsigned char get_latch_u5(void)
|
||||
}
|
||||
EXPORT_SYMBOL(get_latch_u5);
|
||||
|
||||
/* rb532_set_bit - sanely set a bit
|
||||
*
|
||||
* bitval: new value for the bit
|
||||
* offset: bit index in the 4 byte address range
|
||||
* ioaddr: 4 byte aligned address being altered
|
||||
*/
|
||||
static inline void rb532_set_bit(unsigned bitval,
|
||||
unsigned offset, void __iomem *ioaddr)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
bitval = !!bitval; /* map parameter to {0,1} */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
val = readl(ioaddr);
|
||||
val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */
|
||||
val |= ( bitval << offset ); /* set bit if bitval == 1 */
|
||||
writel(val, ioaddr);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* rb532_get_bit - read a bit
|
||||
*
|
||||
* returns the boolean state of the bit, which may be > 1
|
||||
*/
|
||||
static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
|
||||
{
|
||||
return (readl(ioaddr) & (1 << offset));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return GPIO level */
|
||||
static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
u32 mask = 1 << offset;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
return readl(gpch->regbase + GPIOD) & mask;
|
||||
return rb532_get_bit(offset, gpch->regbase + GPIOD);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -128,23 +156,10 @@ static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
|
||||
static void rb532_gpio_set(struct gpio_chip *chip,
|
||||
unsigned offset, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << offset;
|
||||
u32 tmp;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
void __iomem *gpvr;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
gpvr = gpch->regbase + GPIOD;
|
||||
|
||||
local_irq_save(flags);
|
||||
tmp = readl(gpvr);
|
||||
if (value)
|
||||
tmp |= mask;
|
||||
else
|
||||
tmp &= ~mask;
|
||||
writel(tmp, gpvr);
|
||||
local_irq_restore(flags);
|
||||
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -152,21 +167,14 @@ static void rb532_gpio_set(struct gpio_chip *chip,
|
||||
*/
|
||||
static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << offset;
|
||||
u32 value;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
void __iomem *gpdr;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
gpdr = gpch->regbase + GPIOCFG;
|
||||
|
||||
local_irq_save(flags);
|
||||
value = readl(gpdr);
|
||||
value &= ~mask;
|
||||
writel(value, gpdr);
|
||||
local_irq_restore(flags);
|
||||
if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
|
||||
return 1; /* alternate function, GPIOCFG is ignored */
|
||||
|
||||
rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -176,99 +184,20 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
||||
static int rb532_gpio_direction_output(struct gpio_chip *chip,
|
||||
unsigned offset, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << offset;
|
||||
u32 tmp;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
void __iomem *gpdr;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
writel(mask, gpch->regbase + GPIOD);
|
||||
gpdr = gpch->regbase + GPIOCFG;
|
||||
|
||||
local_irq_save(flags);
|
||||
tmp = readl(gpdr);
|
||||
tmp |= mask;
|
||||
writel(tmp, gpdr);
|
||||
local_irq_restore(flags);
|
||||
if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
|
||||
return 1; /* alternate function, GPIOCFG is ignored */
|
||||
|
||||
/* set the initial output value */
|
||||
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
|
||||
|
||||
rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the GPIO interrupt level
|
||||
*/
|
||||
static void rb532_gpio_set_int_level(struct gpio_chip *chip,
|
||||
unsigned offset, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << offset;
|
||||
u32 tmp;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
void __iomem *gpil;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
gpil = gpch->regbase + GPIOILEVEL;
|
||||
|
||||
local_irq_save(flags);
|
||||
tmp = readl(gpil);
|
||||
if (value)
|
||||
tmp |= mask;
|
||||
else
|
||||
tmp &= ~mask;
|
||||
writel(tmp, gpil);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the GPIO interrupt level
|
||||
*/
|
||||
static int rb532_gpio_get_int_level(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
u32 mask = 1 << offset;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
return readl(gpch->regbase + GPIOILEVEL) & mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the GPIO interrupt status
|
||||
*/
|
||||
static void rb532_gpio_set_int_status(struct gpio_chip *chip,
|
||||
unsigned offset, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << offset;
|
||||
u32 tmp;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
void __iomem *gpis;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
gpis = gpch->regbase + GPIOISTAT;
|
||||
|
||||
local_irq_save(flags);
|
||||
tmp = readl(gpis);
|
||||
if (value)
|
||||
tmp |= mask;
|
||||
else
|
||||
tmp &= ~mask;
|
||||
writel(tmp, gpis);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the GPIO interrupt status
|
||||
*/
|
||||
static int rb532_gpio_get_int_status(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
u32 mask = 1 << offset;
|
||||
struct rb532_gpio_chip *gpch;
|
||||
|
||||
gpch = container_of(chip, struct rb532_gpio_chip, chip);
|
||||
return readl(gpch->regbase + GPIOISTAT) & mask;
|
||||
}
|
||||
|
||||
static struct rb532_gpio_chip rb532_gpio_chip[] = {
|
||||
[0] = {
|
||||
.chip = {
|
||||
@ -280,13 +209,35 @@ static struct rb532_gpio_chip rb532_gpio_chip[] = {
|
||||
.base = 0,
|
||||
.ngpio = 32,
|
||||
},
|
||||
.get_int_level = rb532_gpio_get_int_level,
|
||||
.set_int_level = rb532_gpio_set_int_level,
|
||||
.get_int_status = rb532_gpio_get_int_status,
|
||||
.set_int_status = rb532_gpio_set_int_status,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Set GPIO interrupt level
|
||||
*/
|
||||
void rb532_gpio_set_ilevel(int bit, unsigned gpio)
|
||||
{
|
||||
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
|
||||
}
|
||||
EXPORT_SYMBOL(rb532_gpio_set_ilevel);
|
||||
|
||||
/*
|
||||
* Set GPIO interrupt status
|
||||
*/
|
||||
void rb532_gpio_set_istat(int bit, unsigned gpio)
|
||||
{
|
||||
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
|
||||
}
|
||||
EXPORT_SYMBOL(rb532_gpio_set_istat);
|
||||
|
||||
/*
|
||||
* Configure GPIO alternate function
|
||||
*/
|
||||
static void rb532_gpio_set_func(int bit, unsigned gpio)
|
||||
{
|
||||
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
|
||||
}
|
||||
|
||||
int __init rb532_gpio_init(void)
|
||||
{
|
||||
struct resource *r;
|
||||
@ -310,9 +261,11 @@ int __init rb532_gpio_init(void)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/* Set the interrupt status and level for the CF pin */
|
||||
rb532_gpio_set_int_level(&rb532_gpio_chip->chip, CF_GPIO_NUM, 1);
|
||||
rb532_gpio_set_int_status(&rb532_gpio_chip->chip, CF_GPIO_NUM, 0);
|
||||
/* configure CF_GPIO_NUM as CFRDY IRQ source */
|
||||
rb532_gpio_set_func(0, CF_GPIO_NUM);
|
||||
rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM);
|
||||
rb532_gpio_set_ilevel(1, CF_GPIO_NUM);
|
||||
rb532_gpio_set_istat(0, CF_GPIO_NUM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -183,10 +183,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
* being 64 bit in both cases.
|
||||
*/
|
||||
|
||||
static long translate_usr_offset(long offset)
|
||||
static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
|
||||
{
|
||||
if (offset < 0)
|
||||
return -1;
|
||||
return sizeof(struct pt_regs);
|
||||
else if (offset <= 32*4) /* gr[0..31] */
|
||||
return offset * 2 + 4;
|
||||
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
|
||||
@ -194,7 +194,7 @@ static long translate_usr_offset(long offset)
|
||||
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
|
||||
return offset * 2 + 4 - 32*8;
|
||||
else
|
||||
return -1;
|
||||
return sizeof(struct pt_regs);
|
||||
}
|
||||
|
||||
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
@ -209,7 +209,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
if (addr & (sizeof(compat_uint_t)-1))
|
||||
break;
|
||||
addr = translate_usr_offset(addr);
|
||||
if (addr < 0)
|
||||
if (addr >= sizeof(struct pt_regs))
|
||||
break;
|
||||
|
||||
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
|
||||
@ -236,7 +236,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
if (addr & (sizeof(compat_uint_t)-1))
|
||||
break;
|
||||
addr = translate_usr_offset(addr);
|
||||
if (addr < 0)
|
||||
if (addr >= sizeof(struct pt_regs))
|
||||
break;
|
||||
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
|
||||
/* Special case, fp regs are 64 bits anyway */
|
||||
|
@ -338,8 +338,9 @@
|
||||
#define __NR_dup3 320
|
||||
#define __NR_pipe2 321
|
||||
#define __NR_inotify_init1 322
|
||||
#define __NR_accept4 323
|
||||
|
||||
#define NR_SYSCALLS 323
|
||||
#define NR_SYSCALLS 324
|
||||
|
||||
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
|
||||
* it never had the plain ones and there is no value to adding those
|
||||
|
@ -340,8 +340,9 @@
|
||||
#define __NR_dup3 320
|
||||
#define __NR_pipe2 321
|
||||
#define __NR_inotify_init1 322
|
||||
#define __NR_accept4 323
|
||||
|
||||
#define NR_SYSCALLS 323
|
||||
#define NR_SYSCALLS 324
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
|
@ -81,4 +81,4 @@ sys_call_table:
|
||||
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
|
||||
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
|
||||
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1
|
||||
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
|
||||
|
@ -150,7 +150,7 @@ sys32_mmap2:
|
||||
sys32_socketcall: /* %o0=call, %o1=args */
|
||||
cmp %o0, 1
|
||||
bl,pn %xcc, do_einval
|
||||
cmp %o0, 17
|
||||
cmp %o0, 18
|
||||
bg,pn %xcc, do_einval
|
||||
sub %o0, 1, %o0
|
||||
sllx %o0, 5, %o0
|
||||
@ -319,6 +319,15 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
|
||||
63: ldswa [%o1 + 0x0] %asi, %o0
|
||||
sethi %hi(sys_accept4), %g1
|
||||
64: lduwa [%o1 + 0x8] %asi, %o2
|
||||
65: ldswa [%o1 + 0xc] %asi, %o3
|
||||
jmpl %g1 + %lo(sys_accept4), %g0
|
||||
66: lduwa [%o1 + 0x4] %asi, %o1
|
||||
nop
|
||||
nop
|
||||
|
||||
.section __ex_table,"a"
|
||||
.align 4
|
||||
@ -353,4 +362,6 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
|
||||
.word 57b, __retl_efault, 58b, __retl_efault
|
||||
.word 59b, __retl_efault, 60b, __retl_efault
|
||||
.word 61b, __retl_efault, 62b, __retl_efault
|
||||
.word 63b, __retl_efault, 64b, __retl_efault
|
||||
.word 65b, __retl_efault, 66b, __retl_efault
|
||||
.previous
|
||||
|
@ -82,7 +82,7 @@ sys_call_table32:
|
||||
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
|
||||
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
|
||||
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
@ -156,4 +156,4 @@ sys_call_table:
|
||||
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
|
||||
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
|
||||
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
|
||||
|
@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
|
||||
config X86_SMP
|
||||
bool
|
||||
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
default y
|
||||
|
||||
config USE_GENERIC_SMP_HELPERS
|
||||
def_bool y
|
||||
depends on SMP
|
||||
|
||||
config X86_32_SMP
|
||||
def_bool y
|
||||
depends on X86_32 && SMP
|
||||
@ -957,7 +960,7 @@ config ARCH_PHYS_ADDR_T_64BIT
|
||||
config NUMA
|
||||
bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
|
||||
depends on SMP
|
||||
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
|
||||
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
|
||||
default n if X86_PC
|
||||
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
|
||||
help
|
||||
|
@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
|
||||
|
||||
extern int early_pfn_to_nid(unsigned long pfn);
|
||||
|
||||
extern void resume_map_numa_kva(pgd_t *pgd);
|
||||
|
||||
#else /* !CONFIG_NUMA */
|
||||
|
||||
#define get_memcfg_numa get_memcfg_numa_flat
|
||||
|
||||
static inline void resume_map_numa_kva(pgd_t *pgd) {}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
|
@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
||||
return ret;
|
||||
case 10:
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 16);
|
||||
ret, "q", "", "=r", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
||||
|
@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
|
||||
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
|
||||
#define __NR_timerfd_gettime 287
|
||||
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
|
||||
#define __NR_paccept 288
|
||||
__SYSCALL(__NR_paccept, sys_paccept)
|
||||
#define __NR_accept4 288
|
||||
__SYSCALL(__NR_accept4, sys_accept4)
|
||||
#define __NR_signalfd4 289
|
||||
__SYSCALL(__NR_signalfd4, sys_signalfd4)
|
||||
#define __NR_eventfd2 290
|
||||
|
@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
|
||||
address >>= PAGE_SHIFT;
|
||||
iommu_area_free(dom->bitmap, address, pages);
|
||||
|
||||
if (address + pages >= dom->next_bit)
|
||||
if (address >= dom->next_bit)
|
||||
dom->need_flush = true;
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
||||
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
||||
we find in ACPI */
|
||||
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
|
||||
int amd_iommu_isolate; /* if 1, device isolation is enabled */
|
||||
int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
|
||||
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
||||
|
||||
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
||||
@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
|
||||
for (; *str; ++str) {
|
||||
if (strncmp(str, "isolate", 7) == 0)
|
||||
amd_iommu_isolate = 1;
|
||||
if (strncmp(str, "fullflush", 11) == 0)
|
||||
if (strncmp(str, "share", 5) == 0)
|
||||
amd_iommu_isolate = 0;
|
||||
if (strncmp(str, "fullflush", 9) == 0)
|
||||
amd_iommu_unmap_flush = true;
|
||||
}
|
||||
|
||||
|
@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
|
||||
struct ds_context *context = *p_context;
|
||||
|
||||
if (!context) {
|
||||
spin_unlock(&ds_lock);
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
|
||||
if (!context)
|
||||
if (!context) {
|
||||
spin_lock(&ds_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
|
||||
if (!context->ds) {
|
||||
kfree(context);
|
||||
spin_lock(&ds_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
/*
|
||||
* Check for race - another CPU could have allocated
|
||||
* it meanwhile:
|
||||
*/
|
||||
if (*p_context) {
|
||||
kfree(context->ds);
|
||||
kfree(context);
|
||||
return *p_context;
|
||||
}
|
||||
|
||||
*p_context = context;
|
||||
|
||||
context->this = p_context;
|
||||
@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
|
||||
|
||||
spin_lock(&ds_lock);
|
||||
|
||||
if (!check_tracer(task))
|
||||
return -EPERM;
|
||||
|
||||
error = -ENOMEM;
|
||||
context = ds_alloc_context(task);
|
||||
if (!context)
|
||||
goto out_unlock;
|
||||
|
||||
error = -EPERM;
|
||||
if (!check_tracer(task))
|
||||
goto out_unlock;
|
||||
|
||||
error = -EALREADY;
|
||||
if (context->owner[qual] == current)
|
||||
goto out_unlock;
|
||||
|
@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
|
||||
{
|
||||
struct acpi_table_header *header = NULL;
|
||||
int i = 0;
|
||||
acpi_size tbl_size;
|
||||
|
||||
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
|
||||
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
|
||||
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
|
||||
struct oem_table *t = (struct oem_table *)header;
|
||||
|
||||
oem_addrX = t->OEMTableAddr;
|
||||
oem_size = t->OEMTableSize;
|
||||
early_acpi_os_unmap_memory(header, tbl_size);
|
||||
|
||||
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
|
||||
oem_size);
|
||||
return 0;
|
||||
}
|
||||
early_acpi_os_unmap_memory(header, tbl_size);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
|
||||
{
|
||||
if (!oem_addr)
|
||||
return;
|
||||
|
||||
__acpi_unmap_table((char *)oem_addr, oem_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
|
||||
|
||||
cfg->vector = 0;
|
||||
cpus_clear(cfg->domain);
|
||||
|
||||
if (likely(!cfg->move_in_progress))
|
||||
return;
|
||||
cpus_and(mask, cfg->old_domain, cpu_online_map);
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
||||
vector++) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
||||
continue;
|
||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
void __setup_vector_irq(int cpu)
|
||||
|
@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
|
||||
.callback = set_bios_reboot,
|
||||
.ident = "Dell OptiPlex 330",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Dell 2400's */
|
||||
.callback = set_bios_reboot,
|
||||
.ident = "Dell PowerEdge 2400",
|
||||
|
@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
|
||||
.callback = dmi_low_memory_corruption,
|
||||
.ident = "Phoenix BIOS",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
|
||||
},
|
||||
},
|
||||
#endif
|
||||
|
@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
|
||||
cycles_t start, now, prev, end;
|
||||
int i;
|
||||
|
||||
rdtsc_barrier();
|
||||
start = get_cycles();
|
||||
rdtsc_barrier();
|
||||
/*
|
||||
* The measurement runs for 20 msecs:
|
||||
*/
|
||||
@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
|
||||
*/
|
||||
__raw_spin_lock(&sync_lock);
|
||||
prev = last_tsc;
|
||||
rdtsc_barrier();
|
||||
now = get_cycles();
|
||||
rdtsc_barrier();
|
||||
last_tsc = now;
|
||||
__raw_spin_unlock(&sync_lock);
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
* This file provides all the same external entries as smp.c but uses
|
||||
* the voyager hal to provide the functionality
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
|
||||
x86_write_percpu(cpu_number, hard_smp_processor_id());
|
||||
}
|
||||
|
||||
static void voyager_send_call_func(cpumask_t callmask)
|
||||
{
|
||||
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
|
||||
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
|
||||
}
|
||||
|
||||
static void voyager_send_call_func_single(int cpu)
|
||||
{
|
||||
send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
|
||||
}
|
||||
|
||||
struct smp_ops smp_ops = {
|
||||
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = voyager_smp_prepare_cpus,
|
||||
@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
|
||||
.smp_send_stop = voyager_smp_send_stop,
|
||||
.smp_send_reschedule = voyager_smp_send_reschedule,
|
||||
|
||||
.send_call_func_ipi = native_send_call_func_ipi,
|
||||
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
||||
.send_call_func_ipi = voyager_send_call_func,
|
||||
.send_call_func_single_ipi = voyager_send_call_func_single,
|
||||
};
|
||||
|
@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
/**
|
||||
* resume_map_numa_kva - add KVA mapping to the temporary page tables created
|
||||
* during resume from hibernation
|
||||
* @pgd_base - temporary resume page directory
|
||||
*/
|
||||
void resume_map_numa_kva(pgd_t *pgd_base)
|
||||
{
|
||||
int node;
|
||||
|
||||
for_each_online_node(node) {
|
||||
unsigned long start_va, start_pfn, size, pfn;
|
||||
|
||||
start_va = (unsigned long)node_remap_start_vaddr[node];
|
||||
start_pfn = node_remap_start_pfn[node];
|
||||
size = node_remap_size[node];
|
||||
|
||||
printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
|
||||
|
||||
for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
|
||||
unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
|
||||
pgd_t *pgd = pgd_base + pgd_index(vaddr);
|
||||
pud_t *pud = pud_offset(pgd, vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, vaddr);
|
||||
|
||||
set_pmd(pmd, pfn_pmd(start_pfn + pfn,
|
||||
PAGE_KERNEL_LARGE_EXEC));
|
||||
|
||||
printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
|
||||
__FUNCTION__, vaddr, start_pfn + pfn);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long calculate_numa_remap_pages(void)
|
||||
{
|
||||
int nid;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmzone.h>
|
||||
|
||||
/* Defined in hibernate_asm_32.S */
|
||||
extern int restore_image(void);
|
||||
@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resume_map_numa_kva(pgd_base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2847,7 +2847,7 @@ static void do_cciss_request(struct request_queue *q)
|
||||
h->maxSG = seg;
|
||||
|
||||
#ifdef CCISS_DEBUG
|
||||
printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
|
||||
printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
|
||||
creq->nr_sectors, seg);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
@ -3197,7 +3197,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
|
||||
c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("address 0 = %x\n", c->paddr);
|
||||
printk("address 0 = %lx\n", c->paddr);
|
||||
#endif /* CCISS_DEBUG */
|
||||
c->vaddr = remap_pci_mem(c->paddr, 0x250);
|
||||
|
||||
@ -3224,7 +3224,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
#endif /* CCISS_DEBUG */
|
||||
cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("cfg base address index = %x\n", cfg_base_addr_index);
|
||||
printk("cfg base address index = %llx\n",
|
||||
(unsigned long long)cfg_base_addr_index);
|
||||
#endif /* CCISS_DEBUG */
|
||||
if (cfg_base_addr_index == -1) {
|
||||
printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
|
||||
@ -3234,7 +3235,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
|
||||
cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("cfg offset = %x\n", cfg_offset);
|
||||
printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
|
||||
#endif /* CCISS_DEBUG */
|
||||
c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
|
||||
cfg_base_addr_index) +
|
||||
|
@ -1134,7 +1134,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
||||
continue;
|
||||
|
||||
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
|
||||
seq_printf(s, " gpio-%-3d (%-12s) %s %s",
|
||||
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
|
||||
gpio, gdesc->label,
|
||||
is_out ? "out" : "in ",
|
||||
chip->get
|
||||
|
@ -128,6 +128,9 @@ static const char* temperature_sensors_sets[][36] = {
|
||||
/* Set 13: iMac 8,1 */
|
||||
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
|
||||
"TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
|
||||
/* Set 14: iMac 6,1 */
|
||||
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
|
||||
"TO0P", "Tp0P", NULL },
|
||||
};
|
||||
|
||||
/* List of keys used to read/write fan speeds */
|
||||
@ -1296,6 +1299,8 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
|
||||
{ .accelerometer = 1, .light = 1, .temperature_set = 12 },
|
||||
/* iMac 8: light sensor only, temperature set 13 */
|
||||
{ .accelerometer = 0, .light = 0, .temperature_set = 13 },
|
||||
/* iMac 6: light sensor only, temperature set 14 */
|
||||
{ .accelerometer = 0, .light = 0, .temperature_set = 14 },
|
||||
};
|
||||
|
||||
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
|
||||
@ -1349,10 +1354,18 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
|
||||
&applesmc_dmi_data[4]},
|
||||
{ applesmc_dmi_match, "Apple MacPro", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
|
||||
&applesmc_dmi_data[4]},
|
||||
{ applesmc_dmi_match, "Apple iMac 8", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
|
||||
&applesmc_dmi_data[13]},
|
||||
{ applesmc_dmi_match, "Apple iMac 6", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
|
||||
&applesmc_dmi_data[14]},
|
||||
{ applesmc_dmi_match, "Apple iMac 5", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
|
||||
|
@ -1,3 +1,7 @@
|
||||
ifdef CONFIG_SGI_GRU_DEBUG
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SGI_GRU) := gru.o
|
||||
gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o
|
||||
|
||||
|
@ -1681,9 +1681,11 @@ static int atl2_resume(struct pci_dev *pdev)
|
||||
|
||||
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
|
||||
|
||||
err = atl2_request_irq(adapter);
|
||||
if (netif_running(netdev) && err)
|
||||
return err;
|
||||
if (netif_running(netdev)) {
|
||||
err = atl2_request_irq(adapter);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
atl2_reset_hw(&adapter->hw);
|
||||
|
||||
|
@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
|
||||
struct ipg_rx *rxfd = sp->rxd + entry;
|
||||
|
||||
pci_unmap_single(sp->pdev,
|
||||
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
|
||||
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
|
||||
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_irq(sp->rx_buff[entry]);
|
||||
sp->rx_buff[entry] = NULL;
|
||||
@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
|
||||
*/
|
||||
if (sp->rx_buff[entry]) {
|
||||
pci_unmap_single(sp->pdev,
|
||||
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
|
||||
le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
|
||||
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
|
||||
dev_kfree_skb_irq(sp->rx_buff[entry]);
|
||||
@ -1245,7 +1245,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
|
||||
if (jumbo->found_start)
|
||||
dev_kfree_skb_irq(jumbo->skb);
|
||||
|
||||
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
|
||||
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
|
||||
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
|
||||
skb_put(skb, sp->rxfrag_size);
|
||||
@ -1345,7 +1345,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
|
||||
unsigned int entry = curr % IPG_RFDLIST_LENGTH;
|
||||
struct ipg_rx *rxfd = sp->rxd + entry;
|
||||
|
||||
if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE)))
|
||||
if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
|
||||
break;
|
||||
|
||||
switch (ipg_nic_rx_check_frame_type(dev)) {
|
||||
|
@ -1301,7 +1301,36 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
|
||||
/**
|
||||
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
|
||||
IXGBE_WRITE_FLUSH(&adapter->hw);
|
||||
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
|
||||
int i;
|
||||
for (i = 0; i < adapter->num_msix_vectors; i++)
|
||||
synchronize_irq(adapter->msix_entries[i].vector);
|
||||
} else {
|
||||
synchronize_irq(adapter->pdev->irq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_irq_enable - Enable default interrupt generation settings
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
u32 mask;
|
||||
mask = IXGBE_EIMS_ENABLE_MASK;
|
||||
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
|
||||
mask |= IXGBE_EIMS_GPI_SDP1;
|
||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
|
||||
IXGBE_WRITE_FLUSH(&adapter->hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_intr - legacy mode Interrupt Handler
|
||||
@ -1409,37 +1438,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
|
||||
IXGBE_WRITE_FLUSH(&adapter->hw);
|
||||
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
|
||||
int i;
|
||||
for (i = 0; i < adapter->num_msix_vectors; i++)
|
||||
synchronize_irq(adapter->msix_entries[i].vector);
|
||||
} else {
|
||||
synchronize_irq(adapter->pdev->irq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_irq_enable - Enable default interrupt generation settings
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
u32 mask;
|
||||
mask = IXGBE_EIMS_ENABLE_MASK;
|
||||
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
|
||||
mask |= IXGBE_EIMS_GPI_SDP1;
|
||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
|
||||
IXGBE_WRITE_FLUSH(&adapter->hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
|
||||
*
|
||||
|
@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
|
||||
skb_put(skb, framesize);
|
||||
skb->protocol = eth_type_trans(skb, jme->dev);
|
||||
|
||||
if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
|
||||
if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
|
||||
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
|
||||
if (jme->vlgrp) {
|
||||
jme->jme_vlan_rx(skb, jme->vlgrp,
|
||||
le32_to_cpu(rxdesc->descwb.vlan));
|
||||
le16_to_cpu(rxdesc->descwb.vlan));
|
||||
NET_STAT(jme).rx_bytes += 4;
|
||||
}
|
||||
} else {
|
||||
jme->jme_rx(skb);
|
||||
}
|
||||
|
||||
if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
|
||||
RXWBFLAG_DEST_MUL)
|
||||
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
|
||||
cpu_to_le16(RXWBFLAG_DEST_MUL))
|
||||
++(NET_STAT(jme).multicast);
|
||||
|
||||
NET_STAT(jme).rx_bytes += framesize;
|
||||
@ -960,7 +960,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
|
||||
rxdesc = rxring->desc;
|
||||
rxdesc += i;
|
||||
|
||||
if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
|
||||
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
|
||||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
|
||||
goto out;
|
||||
|
||||
@ -1762,10 +1762,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static int
|
||||
jme_tx_tso(struct sk_buff *skb,
|
||||
u16 *mss, u8 *flags)
|
||||
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
|
||||
{
|
||||
*mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
|
||||
*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
|
||||
if (*mss) {
|
||||
*flags |= TXFLAG_LSEN;
|
||||
|
||||
@ -1825,11 +1824,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
|
||||
}
|
||||
|
||||
static inline void
|
||||
jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
|
||||
jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
|
||||
{
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
*flags |= TXFLAG_TAGON;
|
||||
*vlan = vlan_tx_tag_get(skb);
|
||||
*vlan = cpu_to_le16(vlan_tx_tag_get(skb));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -906,7 +906,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
||||
if (skb != NULL) {
|
||||
if (skb_queue_len(&mp->rx_recycle) <
|
||||
mp->default_rx_ring_size &&
|
||||
skb_recycle_check(skb, mp->skb_size))
|
||||
skb_recycle_check(skb, mp->skb_size +
|
||||
dma_get_cache_alignment() - 1))
|
||||
__skb_queue_head(&mp->rx_recycle, skb);
|
||||
else
|
||||
dev_kfree_skb(skb);
|
||||
@ -2478,8 +2479,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
|
||||
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
|
||||
|
||||
if (pd == NULL || pd->shared_smi == NULL) {
|
||||
mdiobus_free(msp->smi_bus);
|
||||
mdiobus_unregister(msp->smi_bus);
|
||||
mdiobus_free(msp->smi_bus);
|
||||
}
|
||||
if (msp->err_interrupt != NO_IRQ)
|
||||
free_irq(msp->err_interrupt, msp);
|
||||
|
@ -564,20 +564,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
|
||||
*/
|
||||
int genphy_config_aneg(struct phy_device *phydev)
|
||||
{
|
||||
int result = 0;
|
||||
int result;
|
||||
|
||||
if (AUTONEG_ENABLE == phydev->autoneg) {
|
||||
int result = genphy_config_advert(phydev);
|
||||
if (AUTONEG_ENABLE != phydev->autoneg)
|
||||
return genphy_setup_forced(phydev);
|
||||
|
||||
if (result < 0) /* error */
|
||||
return result;
|
||||
result = genphy_config_advert(phydev);
|
||||
|
||||
/* Only restart aneg if we are advertising something different
|
||||
* than we were before. */
|
||||
if (result > 0)
|
||||
result = genphy_restart_aneg(phydev);
|
||||
} else
|
||||
result = genphy_setup_forced(phydev);
|
||||
if (result < 0) /* error */
|
||||
return result;
|
||||
|
||||
if (result == 0) {
|
||||
/* Advertisment hasn't changed, but maybe aneg was never on to
|
||||
* begin with? Or maybe phy was isolated? */
|
||||
int ctl = phy_read(phydev, MII_BMCR);
|
||||
|
||||
if (ctl < 0)
|
||||
return ctl;
|
||||
|
||||
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
|
||||
result = 1; /* do restart aneg */
|
||||
}
|
||||
|
||||
/* Only restart aneg if we are advertising something different
|
||||
* than we were before. */
|
||||
if (result > 0)
|
||||
result = genphy_restart_aneg(phydev);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -926,7 +926,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
struct sh_eth_txdesc *txdesc;
|
||||
u32 entry;
|
||||
int flags;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mdp->lock, flags);
|
||||
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
|
||||
@ -1140,7 +1140,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
|
||||
/* Hook up MII support for ethtool */
|
||||
mdp->mii_bus->name = "sh_mii";
|
||||
mdp->mii_bus->parent = &ndev->dev;
|
||||
mdp->mii_bus->id[0] = id;
|
||||
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
|
||||
|
||||
/* PHY IRQ */
|
||||
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
|
@ -1811,7 +1811,7 @@ static int __init smc911x_probe(struct net_device *dev)
|
||||
val = SMC_GET_BYTE_TEST(lp);
|
||||
DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
|
||||
if (val != 0x87654321) {
|
||||
printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
|
||||
printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
|
||||
retval = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
|
||||
mode = AX88178_MEDIUM_DEFAULT;
|
||||
|
||||
if (ecmd.speed == SPEED_1000)
|
||||
mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK;
|
||||
mode |= AX_MEDIUM_GM;
|
||||
else if (ecmd.speed == SPEED_100)
|
||||
mode |= AX_MEDIUM_PS;
|
||||
else
|
||||
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
|
||||
|
||||
mode |= AX_MEDIUM_ENCK;
|
||||
|
||||
if (ecmd.duplex == DUPLEX_FULL)
|
||||
mode |= AX_MEDIUM_FD;
|
||||
else
|
||||
|
@ -1379,7 +1379,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||
|
||||
rxq->queue[i] = NULL;
|
||||
|
||||
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
|
||||
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->aligned_dma_addr,
|
||||
priv->hw_params.rx_buf_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
pkt = (struct iwl_rx_packet *)rxb->skb->data;
|
||||
@ -1432,8 +1432,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
|
||||
rxb->skb = NULL;
|
||||
}
|
||||
|
||||
pci_unmap_single(priv->pci_dev, rxb->dma_addr,
|
||||
priv->hw_params.rx_buf_size,
|
||||
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
|
||||
priv->hw_params.rx_buf_size + 256,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
list_add_tail(&rxb->list, &priv->rxq.rx_used);
|
||||
@ -2334,7 +2334,6 @@ static void iwl_bg_alive_start(struct work_struct *data)
|
||||
mutex_lock(&priv->mutex);
|
||||
iwl_alive_start(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
|
||||
}
|
||||
|
||||
static void iwl_bg_rf_kill(struct work_struct *work)
|
||||
@ -2390,7 +2389,6 @@ static void iwl_bg_set_monitor(struct work_struct *work)
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
|
||||
}
|
||||
|
||||
static void iwl_bg_run_time_calib_work(struct work_struct *work)
|
||||
|
@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
|
||||
#define DEFAULT_LONG_RETRY_LIMIT 4U
|
||||
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t dma_addr;
|
||||
dma_addr_t real_dma_addr;
|
||||
dma_addr_t aligned_dma_addr;
|
||||
struct sk_buff *skb;
|
||||
struct list_head list;
|
||||
};
|
||||
|
@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
|
||||
list_del(element);
|
||||
|
||||
/* Point to Rx buffer via next RBD in circular buffer */
|
||||
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
|
||||
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
|
||||
rxq->queue[rxq->write] = rxb;
|
||||
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
||||
rxq->free_count--;
|
||||
@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
|
||||
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
||||
__GFP_NOWARN | GFP_ATOMIC);
|
||||
if (!rxb->skb) {
|
||||
if (net_ratelimit())
|
||||
@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
||||
list_del(element);
|
||||
|
||||
/* Get physical address of RB/SKB */
|
||||
rxb->dma_addr =
|
||||
pci_map_single(priv->pci_dev, rxb->skb->data,
|
||||
priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
|
||||
rxb->real_dma_addr = pci_map_single(
|
||||
priv->pci_dev,
|
||||
rxb->skb->data,
|
||||
priv->hw_params.rx_buf_size + 256,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
/* dma address must be no more than 36 bits */
|
||||
BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
|
||||
/* and also 256 byte aligned! */
|
||||
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
|
||||
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
|
||||
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
}
|
||||
@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
||||
if (rxq->pool[i].skb != NULL) {
|
||||
pci_unmap_single(priv->pci_dev,
|
||||
rxq->pool[i].dma_addr,
|
||||
priv->hw_params.rx_buf_size,
|
||||
rxq->pool[i].real_dma_addr,
|
||||
priv->hw_params.rx_buf_size + 256,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb(rxq->pool[i].skb);
|
||||
}
|
||||
@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||
* to an SKB, so we need to unmap and free potential storage */
|
||||
if (rxq->pool[i].skb != NULL) {
|
||||
pci_unmap_single(priv->pci_dev,
|
||||
rxq->pool[i].dma_addr,
|
||||
priv->hw_params.rx_buf_size,
|
||||
rxq->pool[i].real_dma_addr,
|
||||
priv->hw_params.rx_buf_size + 256,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
priv->alloc_rxb_skb--;
|
||||
dev_kfree_skb(rxq->pool[i].skb);
|
||||
|
@ -5948,7 +5948,6 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
|
||||
mutex_lock(&priv->mutex);
|
||||
iwl3945_alive_start(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
|
||||
}
|
||||
|
||||
static void iwl3945_bg_rf_kill(struct work_struct *work)
|
||||
@ -5999,7 +5998,6 @@ static void iwl3945_bg_set_monitor(struct work_struct *work)
|
||||
IWL_ERROR("iwl3945_set_mode() failed\n");
|
||||
|
||||
mutex_unlock(&priv->mutex);
|
||||
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
|
||||
}
|
||||
|
||||
#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
|
||||
|
@ -36,7 +36,7 @@ if PARPORT
|
||||
config PARPORT_PC
|
||||
tristate "PC-style hardware"
|
||||
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
|
||||
(!M68K || ISA) && !MN10300 && !AVR32
|
||||
(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN
|
||||
---help---
|
||||
You should say Y here if you have a PC-style parallel port. All
|
||||
IBM PC compatible computers and some Alphas have PC-style
|
||||
|
@ -1655,12 +1655,14 @@ int __init init_dmars(void)
|
||||
iommu->flush.flush_context = __iommu_flush_context;
|
||||
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
|
||||
printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
|
||||
"invalidation\n", drhd->reg_base_addr);
|
||||
"invalidation\n",
|
||||
(unsigned long long)drhd->reg_base_addr);
|
||||
} else {
|
||||
iommu->flush.flush_context = qi_flush_context;
|
||||
iommu->flush.flush_iotlb = qi_flush_iotlb;
|
||||
printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
|
||||
"invalidation\n", drhd->reg_base_addr);
|
||||
"invalidation\n",
|
||||
(unsigned long long)drhd->reg_base_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1832,7 +1832,7 @@ int pci_reset_function(struct pci_dev *dev)
|
||||
if (!(cap & PCI_EXP_DEVCAP_FLR))
|
||||
return -ENOTTY;
|
||||
|
||||
if (!dev->msi_enabled && !dev->msix_enabled)
|
||||
if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
|
||||
disable_irq(dev->irq);
|
||||
pci_save_state(dev);
|
||||
|
||||
@ -1841,7 +1841,7 @@ int pci_reset_function(struct pci_dev *dev)
|
||||
r = pci_execute_reset_function(dev);
|
||||
|
||||
pci_restore_state(dev);
|
||||
if (!dev->msi_enabled && !dev->msix_enabled)
|
||||
if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
|
||||
enable_irq(dev->irq);
|
||||
|
||||
return r;
|
||||
|
@ -352,21 +352,21 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||
} else
|
||||
drv_data->tx_map_len = drv_data->len;
|
||||
|
||||
/* Stream map the rx buffer */
|
||||
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
|
||||
drv_data->rx_map_len,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->rx_dma))
|
||||
/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
|
||||
* so we flush the cache *before* invalidating it, in case
|
||||
* the tx and rx buffers overlap.
|
||||
*/
|
||||
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
|
||||
drv_data->tx_map_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->tx_dma))
|
||||
return 0;
|
||||
|
||||
/* Stream map the tx buffer */
|
||||
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
|
||||
drv_data->tx_map_len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (dma_mapping_error(dev, drv_data->tx_dma)) {
|
||||
dma_unmap_single(dev, drv_data->rx_dma,
|
||||
/* Stream map the rx buffer */
|
||||
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
|
||||
drv_data->rx_map_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->rx_dma)) {
|
||||
dma_unmap_single(dev, drv_data->tx_dma,
|
||||
drv_data->tx_map_len, DMA_TO_DEVICE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -506,20 +506,6 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
|
||||
return -1;
|
||||
|
||||
/* NULL rx means write-only transfer and no map needed
|
||||
since rx DMA will not be used */
|
||||
if (drv_data->rx) {
|
||||
buf = drv_data->rx;
|
||||
drv_data->rx_dma = dma_map_single(
|
||||
dev,
|
||||
buf,
|
||||
drv_data->len,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->rx_dma))
|
||||
return -1;
|
||||
drv_data->rx_dma_needs_unmap = 1;
|
||||
}
|
||||
|
||||
if (drv_data->tx == NULL) {
|
||||
/* Read only message --> use drv_data->dummy_dma_buf for dummy
|
||||
writes to achive reads */
|
||||
@ -533,18 +519,31 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||
buf,
|
||||
drv_data->tx_map_len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->tx_dma)) {
|
||||
if (drv_data->rx_dma) {
|
||||
dma_unmap_single(dev,
|
||||
drv_data->rx_dma,
|
||||
drv_data->len,
|
||||
DMA_FROM_DEVICE);
|
||||
drv_data->rx_dma_needs_unmap = 0;
|
||||
}
|
||||
if (dma_mapping_error(dev, drv_data->tx_dma))
|
||||
return -1;
|
||||
}
|
||||
drv_data->tx_dma_needs_unmap = 1;
|
||||
|
||||
/* NULL rx means write-only transfer and no map needed
|
||||
* since rx DMA will not be used */
|
||||
if (drv_data->rx) {
|
||||
buf = drv_data->rx;
|
||||
drv_data->rx_dma = dma_map_single(dev,
|
||||
buf,
|
||||
drv_data->len,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, drv_data->rx_dma)) {
|
||||
if (drv_data->tx_dma) {
|
||||
dma_unmap_single(dev,
|
||||
drv_data->tx_dma,
|
||||
drv_data->tx_map_len,
|
||||
DMA_TO_DEVICE);
|
||||
drv_data->tx_dma_needs_unmap = 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
drv_data->rx_dma_needs_unmap = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,6 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
|
||||
.bDescriptorType = USB_DT_INTERFACE,
|
||||
|
||||
/* .bInterfaceNumber = DYNAMIC */
|
||||
.bAlternateSetting = 1,
|
||||
.bNumEndpoints = 2,
|
||||
.bInterfaceClass = USB_CLASS_CDC_DATA,
|
||||
.bInterfaceSubClass = 0,
|
||||
@ -303,7 +302,7 @@ static void rndis_response_available(void *_rndis)
|
||||
__le32 *data = req->buf;
|
||||
int status;
|
||||
|
||||
if (atomic_inc_return(&rndis->notify_count))
|
||||
if (atomic_inc_return(&rndis->notify_count) != 1)
|
||||
return;
|
||||
|
||||
/* Send RNDIS RESPONSE_AVAILABLE notification; a
|
||||
|
@ -66,6 +66,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
struct pci_dev *p_smbus;
|
||||
u8 rev;
|
||||
u32 temp;
|
||||
int retval;
|
||||
|
||||
@ -166,6 +168,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
|
||||
pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
|
||||
}
|
||||
break;
|
||||
case PCI_VENDOR_ID_ATI:
|
||||
/* SB700 old version has a bug in EHCI controller,
|
||||
* which causes usb devices lose response in some cases.
|
||||
*/
|
||||
if (pdev->device == 0x4396) {
|
||||
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
|
||||
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
|
||||
NULL);
|
||||
if (!p_smbus)
|
||||
break;
|
||||
rev = p_smbus->revision;
|
||||
if ((rev == 0x3a) || (rev == 0x3b)) {
|
||||
u8 tmp;
|
||||
pci_read_config_byte(pdev, 0x53, &tmp);
|
||||
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
|
||||
}
|
||||
pci_dev_put(p_smbus);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ehci_reset(ehci);
|
||||
|
@ -687,7 +687,10 @@ static ssize_t mon_bin_read(struct file *file, char __user *buf,
|
||||
}
|
||||
|
||||
if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
|
||||
step_len = min(nbytes, (size_t)ep->len_cap);
|
||||
step_len = ep->len_cap;
|
||||
step_len -= rp->b_read - sizeof(struct mon_bin_hdr);
|
||||
if (step_len > nbytes)
|
||||
step_len = nbytes;
|
||||
offset = rp->b_out + PKT_SIZE;
|
||||
offset += rp->b_read - sizeof(struct mon_bin_hdr);
|
||||
if (offset >= rp->b_size)
|
||||
|
@ -1757,7 +1757,7 @@ static int musb_schedule(
|
||||
}
|
||||
}
|
||||
/* use bulk reserved ep1 if no other ep is free */
|
||||
if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
|
||||
if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
|
||||
hw_ep = musb->bulk_ep;
|
||||
if (is_in)
|
||||
head = &musb->in_bulk;
|
||||
|
@ -56,6 +56,7 @@ static void cp2101_shutdown(struct usb_serial *);
|
||||
static int debug;
|
||||
|
||||
static struct usb_device_id id_table [] = {
|
||||
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
|
||||
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
|
||||
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
|
||||
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
|
||||
|
@ -167,6 +167,13 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_FIX_CAPACITY ),
|
||||
|
||||
/* Patch for Nokia 5310 capacity */
|
||||
UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
|
||||
"Nokia",
|
||||
"5310",
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_FIX_CAPACITY ),
|
||||
|
||||
/* Reported by Mario Rettig <mariorettig@web.de> */
|
||||
UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
|
||||
"Nokia",
|
||||
@ -233,14 +240,14 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
|
||||
US_FL_MAX_SECTORS_64 ),
|
||||
|
||||
/* Reported by Cedric Godin <cedric@belbone.be> */
|
||||
UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
|
||||
UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
|
||||
"Nokia",
|
||||
"5300",
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_FIX_CAPACITY ),
|
||||
|
||||
/* Reported by Richard Nauber <RichardNauber@web.de> */
|
||||
UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601,
|
||||
UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
|
||||
"Nokia",
|
||||
"6300",
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
|
@ -132,7 +132,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
|
||||
|
||||
bl = backlight_device_register("backlight", &sinfo->pdev->dev,
|
||||
sinfo, &atmel_lcdc_bl_ops);
|
||||
if (IS_ERR(sinfo->backlight)) {
|
||||
if (IS_ERR(bl)) {
|
||||
dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
|
||||
PTR_ERR(bl));
|
||||
return;
|
||||
|
@ -119,6 +119,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
|
||||
default:
|
||||
dev_err(&pdev->dev, "invalid backlight device ID(%d)\n",
|
||||
pdev->id);
|
||||
kfree(data);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -130,6 +131,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
|
||||
data, &da903x_backlight_ops);
|
||||
if (IS_ERR(bl)) {
|
||||
dev_err(&pdev->dev, "failed to register backlight\n");
|
||||
kfree(data);
|
||||
return PTR_ERR(bl);
|
||||
}
|
||||
|
||||
|
@ -42,10 +42,13 @@ static int fb_notifier_callback(struct notifier_block *self,
|
||||
|
||||
mutex_lock(&ld->ops_lock);
|
||||
if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
|
||||
if (event == FB_EVENT_BLANK)
|
||||
ld->ops->set_power(ld, *(int *)evdata->data);
|
||||
else
|
||||
ld->ops->set_mode(ld, evdata->data);
|
||||
if (event == FB_EVENT_BLANK) {
|
||||
if (ld->ops->set_power)
|
||||
ld->ops->set_power(ld, *(int *)evdata->data);
|
||||
} else {
|
||||
if (ld->ops->set_mode)
|
||||
ld->ops->set_mode(ld, evdata->data);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ld->ops_lock);
|
||||
return 0;
|
||||
|
@ -2462,8 +2462,7 @@ static int __init cirrusfb_init(void)
|
||||
|
||||
#ifndef MODULE
|
||||
static int __init cirrusfb_setup(char *options) {
|
||||
char *this_opt, s[32];
|
||||
int i;
|
||||
char *this_opt;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
|
@ -230,7 +230,7 @@ static void fb_set_logo_directpalette(struct fb_info *info,
|
||||
greenshift = info->var.green.offset;
|
||||
blueshift = info->var.blue.offset;
|
||||
|
||||
for (i = 32; i < logo->clutsize; i++)
|
||||
for (i = 32; i < 32 + logo->clutsize; i++)
|
||||
palette[i] = i << redshift | i << greenshift | i << blueshift;
|
||||
}
|
||||
|
||||
|
@ -222,6 +222,9 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
|
||||
unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC);
|
||||
|
||||
|
||||
tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
|
||||
|
||||
#ifdef CONFIG_FB_TMIO_ACCELL
|
||||
/*
|
||||
* We were in polling mode and now we got correct irq.
|
||||
* Switch back to IRQ-based sync of command FIFO
|
||||
@ -231,9 +234,6 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
|
||||
par->use_polling = false;
|
||||
}
|
||||
|
||||
tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
|
||||
|
||||
#ifdef CONFIG_FB_TMIO_ACCELL
|
||||
if (bbisc & 1)
|
||||
wake_up(&par->wait_acc);
|
||||
#endif
|
||||
@ -938,7 +938,9 @@ static void tmiofb_dump_regs(struct platform_device *dev)
|
||||
static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
|
||||
{
|
||||
struct fb_info *info = platform_get_drvdata(dev);
|
||||
#ifdef CONFIG_FB_TMIO_ACCELL
|
||||
struct tmiofb_par *par = info->par;
|
||||
#endif
|
||||
struct mfd_cell *cell = dev->dev.platform_data;
|
||||
int retval = 0;
|
||||
|
||||
@ -950,12 +952,14 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
|
||||
info->fbops->fb_sync(info);
|
||||
|
||||
|
||||
#ifdef CONFIG_FB_TMIO_ACCELL
|
||||
/*
|
||||
* The fb should be usable even if interrupts are disabled (and they are
|
||||
* during suspend/resume). Switch temporary to forced polling.
|
||||
*/
|
||||
printk(KERN_INFO "tmiofb: switching to polling\n");
|
||||
par->use_polling = true;
|
||||
#endif
|
||||
tmiofb_hw_stop(dev);
|
||||
|
||||
if (cell->suspend)
|
||||
|
@ -2036,30 +2036,30 @@ static int viafb_vt1636_proc_write(struct file *file,
|
||||
return count;
|
||||
}
|
||||
|
||||
static void viafb_init_proc(struct proc_dir_entry *viafb_entry)
|
||||
static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
viafb_entry = proc_mkdir("viafb", NULL);
|
||||
*viafb_entry = proc_mkdir("viafb", NULL);
|
||||
if (viafb_entry) {
|
||||
entry = create_proc_entry("dvp0", 0, viafb_entry);
|
||||
entry = create_proc_entry("dvp0", 0, *viafb_entry);
|
||||
if (entry) {
|
||||
entry->owner = THIS_MODULE;
|
||||
entry->read_proc = viafb_dvp0_proc_read;
|
||||
entry->write_proc = viafb_dvp0_proc_write;
|
||||
}
|
||||
entry = create_proc_entry("dvp1", 0, viafb_entry);
|
||||
entry = create_proc_entry("dvp1", 0, *viafb_entry);
|
||||
if (entry) {
|
||||
entry->owner = THIS_MODULE;
|
||||
entry->read_proc = viafb_dvp1_proc_read;
|
||||
entry->write_proc = viafb_dvp1_proc_write;
|
||||
}
|
||||
entry = create_proc_entry("dfph", 0, viafb_entry);
|
||||
entry = create_proc_entry("dfph", 0, *viafb_entry);
|
||||
if (entry) {
|
||||
entry->owner = THIS_MODULE;
|
||||
entry->read_proc = viafb_dfph_proc_read;
|
||||
entry->write_proc = viafb_dfph_proc_write;
|
||||
}
|
||||
entry = create_proc_entry("dfpl", 0, viafb_entry);
|
||||
entry = create_proc_entry("dfpl", 0, *viafb_entry);
|
||||
if (entry) {
|
||||
entry->owner = THIS_MODULE;
|
||||
entry->read_proc = viafb_dfpl_proc_read;
|
||||
@ -2068,7 +2068,7 @@ static void viafb_init_proc(struct proc_dir_entry *viafb_entry)
|
||||
if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
|
||||
lvds_chip_name || VT1636_LVDS ==
|
||||
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
|
||||
entry = create_proc_entry("vt1636", 0, viafb_entry);
|
||||
entry = create_proc_entry("vt1636", 0, *viafb_entry);
|
||||
if (entry) {
|
||||
entry->owner = THIS_MODULE;
|
||||
entry->read_proc = viafb_vt1636_proc_read;
|
||||
@ -2087,6 +2087,7 @@ static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
|
||||
remove_proc_entry("dfpl", viafb_entry);
|
||||
remove_proc_entry("vt1636", viafb_entry);
|
||||
remove_proc_entry("vt1625", viafb_entry);
|
||||
remove_proc_entry("viafb", NULL);
|
||||
}
|
||||
|
||||
static int __devinit via_pci_probe(void)
|
||||
@ -2348,7 +2349,7 @@ static int __devinit via_pci_probe(void)
|
||||
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
|
||||
default_var.yres, default_var.bits_per_pixel);
|
||||
|
||||
viafb_init_proc(viaparinfo->proc_entry);
|
||||
viafb_init_proc(&viaparinfo->proc_entry);
|
||||
viafb_init_dac(IGA2);
|
||||
return 0;
|
||||
}
|
||||
|
@ -86,8 +86,8 @@ static struct platform_driver omap_hdq_driver = {
|
||||
static u8 omap_w1_read_byte(void *_hdq);
|
||||
static void omap_w1_write_byte(void *_hdq, u8 byte);
|
||||
static u8 omap_w1_reset_bus(void *_hdq);
|
||||
static void omap_w1_search_bus(void *_hdq, u8 search_type,
|
||||
w1_slave_found_callback slave_found);
|
||||
static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
|
||||
u8 search_type, w1_slave_found_callback slave_found);
|
||||
|
||||
|
||||
static struct w1_bus_master omap_w1_master = {
|
||||
@ -231,8 +231,8 @@ static u8 omap_w1_reset_bus(void *_hdq)
|
||||
}
|
||||
|
||||
/* W1 search callback function */
|
||||
static void omap_w1_search_bus(void *_hdq, u8 search_type,
|
||||
w1_slave_found_callback slave_found)
|
||||
static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
|
||||
u8 search_type, w1_slave_found_callback slave_found)
|
||||
{
|
||||
u64 module_id, rn_le, cs, id;
|
||||
|
||||
@ -249,7 +249,7 @@ static void omap_w1_search_bus(void *_hdq, u8 search_type,
|
||||
cs = w1_calc_crc8((u8 *)&rn_le, 7);
|
||||
id = (cs << 56) | module_id;
|
||||
|
||||
slave_found(_hdq, id);
|
||||
slave_found(master_dev, id);
|
||||
}
|
||||
|
||||
static int _omap_hdq_reset(struct hdq_data *hdq_data)
|
||||
|
@ -122,14 +122,7 @@ static struct timer_list balloon_timer;
|
||||
static void scrub_page(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_XEN_SCRUB_PAGES
|
||||
if (PageHighMem(page)) {
|
||||
void *v = kmap(page);
|
||||
clear_page(v);
|
||||
kunmap(v);
|
||||
} else {
|
||||
void *v = page_address(page);
|
||||
clear_page(v);
|
||||
}
|
||||
clear_highpage(page);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,11 @@ handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp
|
||||
sends, and also let tcp autotune the socket send and receive buffers.
|
||||
This reduces the number of EAGAIN errors returned by TCP/IP in
|
||||
high stress workloads (and the number of retries on socket writes
|
||||
when sending large SMBWriteX requests).
|
||||
when sending large SMBWriteX requests). Fix case in which a portion of
|
||||
data can in some cases not get written to the file on the server before the
|
||||
file is closed. Fix DFS parsing to properly handle path consumed field,
|
||||
and to handle certain codepage conversions better. Fix mount and
|
||||
umount race that can cause oops in mount or umount or reconnect.
|
||||
|
||||
Version 1.54
|
||||
------------
|
||||
|
@ -606,7 +606,15 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
|
||||
* changes to the tcon->tidStatus should be done while holding this lock.
|
||||
*/
|
||||
GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
|
||||
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
|
||||
|
||||
/*
|
||||
* This lock protects the cifs_file->llist and cifs_file->flist
|
||||
* list operations, and updates to some flags (cifs_file->invalidHandle)
|
||||
* It will be moved to either use the tcon->stat_lock or equivalent later.
|
||||
* If cifs_tcp_ses_lock and the lock below are both needed to be held, then
|
||||
* the cifs_tcp_ses_lock must be grabbed first and released last.
|
||||
*/
|
||||
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
|
||||
|
||||
GLOBAL_EXTERN struct list_head GlobalOplock_Q;
|
||||
|
||||
|
@ -295,7 +295,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
|
||||
check for tcp and smb session status done differently
|
||||
for those three - in the calling routine */
|
||||
if (tcon) {
|
||||
if (tcon->need_reconnect) {
|
||||
if (tcon->tidStatus == CifsExiting) {
|
||||
/* only tree disconnect, open, and write,
|
||||
(and ulogoff which does not have tcon)
|
||||
are allowed as we start force umount */
|
||||
|
@ -488,12 +488,13 @@ int cifs_close(struct inode *inode, struct file *file)
|
||||
pTcon = cifs_sb->tcon;
|
||||
if (pSMBFile) {
|
||||
struct cifsLockInfo *li, *tmp;
|
||||
|
||||
write_lock(&GlobalSMBSeslock);
|
||||
pSMBFile->closePend = true;
|
||||
if (pTcon) {
|
||||
/* no sense reconnecting to close a file that is
|
||||
already closed */
|
||||
if (!pTcon->need_reconnect) {
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
timeout = 2;
|
||||
while ((atomic_read(&pSMBFile->wrtPending) != 0)
|
||||
&& (timeout <= 2048)) {
|
||||
@ -510,12 +511,15 @@ int cifs_close(struct inode *inode, struct file *file)
|
||||
timeout *= 4;
|
||||
}
|
||||
if (atomic_read(&pSMBFile->wrtPending))
|
||||
cERROR(1,
|
||||
("close with pending writes"));
|
||||
rc = CIFSSMBClose(xid, pTcon,
|
||||
cERROR(1, ("close with pending write"));
|
||||
if (!pTcon->need_reconnect &&
|
||||
!pSMBFile->invalidHandle)
|
||||
rc = CIFSSMBClose(xid, pTcon,
|
||||
pSMBFile->netfid);
|
||||
}
|
||||
}
|
||||
} else
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
} else
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
|
||||
/* Delete any outstanding lock records.
|
||||
We'll lose them when the file is closed anyway. */
|
||||
@ -587,15 +591,18 @@ int cifs_closedir(struct inode *inode, struct file *file)
|
||||
pTcon = cifs_sb->tcon;
|
||||
|
||||
cFYI(1, ("Freeing private data in close dir"));
|
||||
write_lock(&GlobalSMBSeslock);
|
||||
if (!pCFileStruct->srch_inf.endOfSearch &&
|
||||
!pCFileStruct->invalidHandle) {
|
||||
pCFileStruct->invalidHandle = true;
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
|
||||
cFYI(1, ("Closing uncompleted readdir with rc %d",
|
||||
rc));
|
||||
/* not much we can do if it fails anyway, ignore rc */
|
||||
rc = 0;
|
||||
}
|
||||
} else
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
|
||||
if (ptmp) {
|
||||
cFYI(1, ("closedir free smb buf in srch struct"));
|
||||
|
@ -555,12 +555,14 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
|
||||
continue;
|
||||
|
||||
cifs_stats_inc(&tcon->num_oplock_brks);
|
||||
write_lock(&GlobalSMBSeslock);
|
||||
list_for_each(tmp2, &tcon->openFileList) {
|
||||
netfile = list_entry(tmp2, struct cifsFileInfo,
|
||||
tlist);
|
||||
if (pSMB->Fid != netfile->netfid)
|
||||
continue;
|
||||
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
read_unlock(&cifs_tcp_ses_lock);
|
||||
cFYI(1, ("file id match, oplock break"));
|
||||
pCifsInode = CIFS_I(netfile->pInode);
|
||||
@ -576,6 +578,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
|
||||
|
||||
return true;
|
||||
}
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
read_unlock(&cifs_tcp_ses_lock);
|
||||
cFYI(1, ("No matching file for oplock break"));
|
||||
return true;
|
||||
|
@ -741,11 +741,14 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
|
||||
(index_to_find < first_entry_in_buffer)) {
|
||||
/* close and restart search */
|
||||
cFYI(1, ("search backing up - close and restart search"));
|
||||
write_lock(&GlobalSMBSeslock);
|
||||
if (!cifsFile->srch_inf.endOfSearch &&
|
||||
!cifsFile->invalidHandle) {
|
||||
cifsFile->invalidHandle = true;
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
CIFSFindClose(xid, pTcon, cifsFile->netfid);
|
||||
}
|
||||
} else
|
||||
write_unlock(&GlobalSMBSeslock);
|
||||
if (cifsFile->srch_inf.ntwrk_buf_start) {
|
||||
cFYI(1, ("freeing SMB ff cache buf on search rewind"));
|
||||
if (cifsFile->srch_inf.smallBuf)
|
||||
|
@ -1037,17 +1037,14 @@ static int
|
||||
decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
|
||||
struct ecryptfs_crypt_stat *crypt_stat)
|
||||
{
|
||||
struct scatterlist dst_sg;
|
||||
struct scatterlist src_sg;
|
||||
struct scatterlist dst_sg[2];
|
||||
struct scatterlist src_sg[2];
|
||||
struct mutex *tfm_mutex;
|
||||
struct blkcipher_desc desc = {
|
||||
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
|
||||
};
|
||||
int rc = 0;
|
||||
|
||||
sg_init_table(&dst_sg, 1);
|
||||
sg_init_table(&src_sg, 1);
|
||||
|
||||
if (unlikely(ecryptfs_verbosity > 0)) {
|
||||
ecryptfs_printk(
|
||||
KERN_DEBUG, "Session key encryption key (size [%d]):\n",
|
||||
@ -1066,8 +1063,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
|
||||
}
|
||||
rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key,
|
||||
auth_tok->session_key.encrypted_key_size,
|
||||
&src_sg, 1);
|
||||
if (rc != 1) {
|
||||
src_sg, 2);
|
||||
if (rc < 1 || rc > 2) {
|
||||
printk(KERN_ERR "Internal error whilst attempting to convert "
|
||||
"auth_tok->session_key.encrypted_key to scatterlist; "
|
||||
"expected rc = 1; got rc = [%d]. "
|
||||
@ -1079,8 +1076,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
|
||||
auth_tok->session_key.encrypted_key_size;
|
||||
rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key,
|
||||
auth_tok->session_key.decrypted_key_size,
|
||||
&dst_sg, 1);
|
||||
if (rc != 1) {
|
||||
dst_sg, 2);
|
||||
if (rc < 1 || rc > 2) {
|
||||
printk(KERN_ERR "Internal error whilst attempting to convert "
|
||||
"auth_tok->session_key.decrypted_key to scatterlist; "
|
||||
"expected rc = 1; got rc = [%d]\n", rc);
|
||||
@ -1096,7 +1093,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
rc = crypto_blkcipher_decrypt(&desc, &dst_sg, &src_sg,
|
||||
rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg,
|
||||
auth_tok->session_key.encrypted_key_size);
|
||||
mutex_unlock(tfm_mutex);
|
||||
if (unlikely(rc)) {
|
||||
@ -1539,8 +1536,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
|
||||
size_t i;
|
||||
size_t encrypted_session_key_valid = 0;
|
||||
char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
|
||||
struct scatterlist dst_sg;
|
||||
struct scatterlist src_sg;
|
||||
struct scatterlist dst_sg[2];
|
||||
struct scatterlist src_sg[2];
|
||||
struct mutex *tfm_mutex = NULL;
|
||||
u8 cipher_code;
|
||||
size_t packet_size_length;
|
||||
@ -1619,8 +1616,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
|
||||
ecryptfs_dump_hex(session_key_encryption_key, 16);
|
||||
}
|
||||
rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size,
|
||||
&src_sg, 1);
|
||||
if (rc != 1) {
|
||||
src_sg, 2);
|
||||
if (rc < 1 || rc > 2) {
|
||||
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
|
||||
"for crypt_stat session key; expected rc = 1; "
|
||||
"got rc = [%d]. key_rec->enc_key_size = [%d]\n",
|
||||
@ -1629,8 +1626,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
|
||||
goto out;
|
||||
}
|
||||
rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size,
|
||||
&dst_sg, 1);
|
||||
if (rc != 1) {
|
||||
dst_sg, 2);
|
||||
if (rc < 1 || rc > 2) {
|
||||
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
|
||||
"for crypt_stat encrypted session key; "
|
||||
"expected rc = 1; got rc = [%d]. "
|
||||
@ -1651,7 +1648,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
|
||||
rc = 0;
|
||||
ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n",
|
||||
crypt_stat->key_size);
|
||||
rc = crypto_blkcipher_encrypt(&desc, &dst_sg, &src_sg,
|
||||
rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg,
|
||||
(*key_rec).enc_key_size);
|
||||
mutex_unlock(tfm_mutex);
|
||||
if (rc) {
|
||||
|
@ -81,7 +81,7 @@ extern int do_rmdir(const char *file);
|
||||
extern int do_mknod(const char *file, int mode, unsigned int major,
|
||||
unsigned int minor);
|
||||
extern int link_file(const char *from, const char *to);
|
||||
extern int do_readlink(char *file, char *buf, int size);
|
||||
extern int hostfs_do_readlink(char *file, char *buf, int size);
|
||||
extern int rename_file(char *from, char *to);
|
||||
extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
|
||||
long long *bfree_out, long long *bavail_out,
|
||||
|
@ -168,7 +168,7 @@ static char *follow_link(char *link)
|
||||
if (name == NULL)
|
||||
goto out;
|
||||
|
||||
n = do_readlink(link, name, len);
|
||||
n = hostfs_do_readlink(link, name, len);
|
||||
if (n < len)
|
||||
break;
|
||||
len *= 2;
|
||||
@ -943,7 +943,7 @@ int hostfs_link_readpage(struct file *file, struct page *page)
|
||||
name = inode_name(page->mapping->host, 0);
|
||||
if (name == NULL)
|
||||
return -ENOMEM;
|
||||
err = do_readlink(name, buffer, PAGE_CACHE_SIZE);
|
||||
err = hostfs_do_readlink(name, buffer, PAGE_CACHE_SIZE);
|
||||
kfree(name);
|
||||
if (err == PAGE_CACHE_SIZE)
|
||||
err = -E2BIG;
|
||||
|
@ -377,7 +377,7 @@ int link_file(const char *to, const char *from)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int do_readlink(char *file, char *buf, int size)
|
||||
int hostfs_do_readlink(char *file, char *buf, int size)
|
||||
{
|
||||
int n;
|
||||
|
||||
|
@ -1378,7 +1378,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
|
||||
if (IS_APPEND(dir))
|
||||
return -EPERM;
|
||||
if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
|
||||
IS_IMMUTABLE(victim->d_inode))
|
||||
IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
|
||||
return -EPERM;
|
||||
if (isdir) {
|
||||
if (!S_ISDIR(victim->d_inode->i_mode))
|
||||
|
@ -74,8 +74,6 @@ static inline int cpuset_do_slab_mem_spread(void)
|
||||
return current->flags & PF_SPREAD_SLAB;
|
||||
}
|
||||
|
||||
extern void cpuset_track_online_nodes(void);
|
||||
|
||||
extern int current_cpuset_is_being_rebound(void);
|
||||
|
||||
extern void rebuild_sched_domains(void);
|
||||
@ -151,8 +149,6 @@ static inline int cpuset_do_slab_mem_spread(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cpuset_track_online_nodes(void) {}
|
||||
|
||||
static inline int current_cpuset_is_being_rebound(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -40,7 +40,7 @@
|
||||
#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
|
||||
#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
|
||||
#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
|
||||
#define SYS_PACCEPT 18 /* sys_paccept(2) */
|
||||
#define SYS_ACCEPT4 18 /* sys_accept4(2) */
|
||||
|
||||
typedef enum {
|
||||
SS_FREE = 0, /* not allocated */
|
||||
@ -100,7 +100,7 @@ enum sock_type {
|
||||
* remaining bits are used as flags. */
|
||||
#define SOCK_TYPE_MASK 0xf
|
||||
|
||||
/* Flags for socket, socketpair, paccept */
|
||||
/* Flags for socket, socketpair, accept4 */
|
||||
#define SOCK_CLOEXEC O_CLOEXEC
|
||||
#ifndef SOCK_NONBLOCK
|
||||
#define SOCK_NONBLOCK O_NONBLOCK
|
||||
@ -223,8 +223,6 @@ extern int sock_map_fd(struct socket *sock, int flags);
|
||||
extern struct socket *sockfd_lookup(int fd, int *err);
|
||||
#define sockfd_put(sock) fput(sock->file)
|
||||
extern int net_ratelimit(void);
|
||||
extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
|
||||
int __user *upeer_addrlen, int flags);
|
||||
|
||||
#define net_random() random32()
|
||||
#define net_srandom(seed) srandom32((__force u32)seed)
|
||||
|
@ -410,8 +410,7 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
|
||||
asmlinkage long sys_bind(int, struct sockaddr __user *, int);
|
||||
asmlinkage long sys_connect(int, struct sockaddr __user *, int);
|
||||
asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
|
||||
asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *,
|
||||
const __user sigset_t *, size_t, int);
|
||||
asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
|
||||
asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
|
||||
asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
|
||||
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
|
||||
|
@ -74,11 +74,19 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum ieee80211_notification_type - Low level driver notification
|
||||
* @IEEE80211_NOTIFY_RE_ASSOC: start the re-association sequence
|
||||
* struct ieee80211_ht_bss_info - describing BSS's HT characteristics
|
||||
*
|
||||
* This structure describes most essential parameters needed
|
||||
* to describe 802.11n HT characteristics in a BSS.
|
||||
*
|
||||
* @primary_channel: channel number of primery channel
|
||||
* @bss_cap: 802.11n's general BSS capabilities (e.g. channel width)
|
||||
* @bss_op_mode: 802.11n's BSS operation modes (e.g. HT protection)
|
||||
*/
|
||||
enum ieee80211_notification_types {
|
||||
IEEE80211_NOTIFY_RE_ASSOC,
|
||||
struct ieee80211_ht_bss_info {
|
||||
u8 primary_channel;
|
||||
u8 bss_cap; /* use IEEE80211_HT_IE_CHA_ */
|
||||
u8 bss_op_mode; /* use IEEE80211_HT_IE_ */
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1854,18 +1862,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid);
|
||||
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra,
|
||||
u16 tid);
|
||||
|
||||
/**
|
||||
* ieee80211_notify_mac - low level driver notification
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw().
|
||||
* @notif_type: enum ieee80211_notification_types
|
||||
*
|
||||
* This function must be called by low level driver to inform mac80211 of
|
||||
* low level driver status change or force mac80211 to re-assoc for low
|
||||
* level driver internal error that require re-assoc.
|
||||
*/
|
||||
void ieee80211_notify_mac(struct ieee80211_hw *hw,
|
||||
enum ieee80211_notification_types notif_type);
|
||||
|
||||
/**
|
||||
* ieee80211_find_sta - find a station
|
||||
*
|
||||
|
14
ipc/util.c
14
ipc/util.c
@ -266,9 +266,17 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
||||
if (ids->in_use >= size)
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock_init(&new->lock);
|
||||
new->deleted = 0;
|
||||
rcu_read_lock();
|
||||
spin_lock(&new->lock);
|
||||
|
||||
err = idr_get_new(&ids->ipcs_idr, new, &id);
|
||||
if (err)
|
||||
if (err) {
|
||||
spin_unlock(&new->lock);
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
ids->in_use++;
|
||||
|
||||
@ -280,10 +288,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
||||
ids->seq = 0;
|
||||
|
||||
new->id = ipc_buildid(id, new->seq);
|
||||
spin_lock_init(&new->lock);
|
||||
new->deleted = 0;
|
||||
rcu_read_lock();
|
||||
spin_lock(&new->lock);
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -2039,10 +2039,13 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
|
||||
struct cgroup *cgrp;
|
||||
struct cgroup_iter it;
|
||||
struct task_struct *tsk;
|
||||
|
||||
/*
|
||||
* Validate dentry by checking the superblock operations
|
||||
* Validate dentry by checking the superblock operations,
|
||||
* and make sure it's a directory.
|
||||
*/
|
||||
if (dentry->d_sb->s_op != &cgroup_ops)
|
||||
if (dentry->d_sb->s_op != &cgroup_ops ||
|
||||
!S_ISDIR(dentry->d_inode->i_mode))
|
||||
goto err;
|
||||
|
||||
ret = 0;
|
||||
@ -2472,10 +2475,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
parent = cgrp->parent;
|
||||
root = cgrp->root;
|
||||
sb = root->sb;
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
|
||||
/*
|
||||
* Call pre_destroy handlers of subsys. Notify subsystems
|
||||
@ -2483,7 +2483,14 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
||||
*/
|
||||
cgroup_call_pre_destroy(cgrp);
|
||||
|
||||
if (cgroup_has_css_refs(cgrp)) {
|
||||
mutex_lock(&cgroup_mutex);
|
||||
parent = cgrp->parent;
|
||||
root = cgrp->root;
|
||||
sb = root->sb;
|
||||
|
||||
if (atomic_read(&cgrp->count)
|
||||
|| !list_empty(&cgrp->children)
|
||||
|| cgroup_has_css_refs(cgrp)) {
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/mempolicy.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/namei.h>
|
||||
@ -2015,12 +2016,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
|
||||
* Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
|
||||
* See also the previous routine cpuset_track_online_cpus().
|
||||
*/
|
||||
void cpuset_track_online_nodes(void)
|
||||
static int cpuset_track_online_nodes(struct notifier_block *self,
|
||||
unsigned long action, void *arg)
|
||||
{
|
||||
cgroup_lock();
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
scan_for_empty_cpusets(&top_cpuset);
|
||||
switch (action) {
|
||||
case MEM_ONLINE:
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
break;
|
||||
case MEM_OFFLINE:
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
scan_for_empty_cpusets(&top_cpuset);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
cgroup_unlock();
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2036,6 +2048,7 @@ void __init cpuset_init_smp(void)
|
||||
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
||||
|
||||
hotcpu_notifier(cpuset_track_online_cpus, 0);
|
||||
hotplug_memory_notifier(cpuset_track_online_nodes, 10);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -304,17 +304,24 @@ int sprint_symbol(char *buffer, unsigned long address)
|
||||
char *modname;
|
||||
const char *name;
|
||||
unsigned long offset, size;
|
||||
char namebuf[KSYM_NAME_LEN];
|
||||
int len;
|
||||
|
||||
name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
|
||||
name = kallsyms_lookup(address, &size, &offset, &modname, buffer);
|
||||
if (!name)
|
||||
return sprintf(buffer, "0x%lx", address);
|
||||
|
||||
if (name != buffer)
|
||||
strcpy(buffer, name);
|
||||
len = strlen(buffer);
|
||||
buffer += len;
|
||||
|
||||
if (modname)
|
||||
return sprintf(buffer, "%s+%#lx/%#lx [%s]", name, offset,
|
||||
size, modname);
|
||||
len += sprintf(buffer, "+%#lx/%#lx [%s]",
|
||||
offset, size, modname);
|
||||
else
|
||||
return sprintf(buffer, "%s+%#lx/%#lx", name, offset, size);
|
||||
len += sprintf(buffer, "+%#lx/%#lx", offset, size);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
/* Look up a kernel symbol and print it to the kernel messages. */
|
||||
|
@ -31,7 +31,7 @@ cond_syscall(sys_socketpair);
|
||||
cond_syscall(sys_bind);
|
||||
cond_syscall(sys_listen);
|
||||
cond_syscall(sys_accept);
|
||||
cond_syscall(sys_paccept);
|
||||
cond_syscall(sys_accept4);
|
||||
cond_syscall(sys_connect);
|
||||
cond_syscall(sys_getsockname);
|
||||
cond_syscall(sys_getpeername);
|
||||
|
@ -326,96 +326,89 @@ ftrace_record_ip(unsigned long ip)
|
||||
|
||||
static int
|
||||
__ftrace_replace_code(struct dyn_ftrace *rec,
|
||||
unsigned char *old, unsigned char *new, int enable)
|
||||
unsigned char *nop, int enable)
|
||||
{
|
||||
unsigned long ip, fl;
|
||||
unsigned char *call, *old, *new;
|
||||
|
||||
ip = rec->ip;
|
||||
|
||||
if (ftrace_filtered && enable) {
|
||||
/*
|
||||
* If filtering is on:
|
||||
*
|
||||
* If this record is set to be filtered and
|
||||
* is enabled then do nothing.
|
||||
*
|
||||
* If this record is set to be filtered and
|
||||
* it is not enabled, enable it.
|
||||
*
|
||||
* If this record is not set to be filtered
|
||||
* and it is not enabled do nothing.
|
||||
*
|
||||
* If this record is set not to trace then
|
||||
* do nothing.
|
||||
*
|
||||
* If this record is set not to trace and
|
||||
* it is enabled then disable it.
|
||||
*
|
||||
* If this record is not set to be filtered and
|
||||
* it is enabled, disable it.
|
||||
*/
|
||||
|
||||
fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
|
||||
FTRACE_FL_ENABLED);
|
||||
|
||||
if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
|
||||
(fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
|
||||
!fl || (fl == FTRACE_FL_NOTRACE))
|
||||
/*
|
||||
* If this record is not to be traced and
|
||||
* it is not enabled then do nothing.
|
||||
*
|
||||
* If this record is not to be traced and
|
||||
* it is enabled then disabled it.
|
||||
*
|
||||
*/
|
||||
if (rec->flags & FTRACE_FL_NOTRACE) {
|
||||
if (rec->flags & FTRACE_FL_ENABLED)
|
||||
rec->flags &= ~FTRACE_FL_ENABLED;
|
||||
else
|
||||
return 0;
|
||||
|
||||
} else if (ftrace_filtered && enable) {
|
||||
/*
|
||||
* If it is enabled disable it,
|
||||
* otherwise enable it!
|
||||
* Filtering is on:
|
||||
*/
|
||||
if (fl & FTRACE_FL_ENABLED) {
|
||||
/* swap new and old */
|
||||
new = old;
|
||||
old = ftrace_call_replace(ip, FTRACE_ADDR);
|
||||
|
||||
fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
|
||||
|
||||
/* Record is filtered and enabled, do nothing */
|
||||
if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
|
||||
return 0;
|
||||
|
||||
/* Record is not filtered and is not enabled do nothing */
|
||||
if (!fl)
|
||||
return 0;
|
||||
|
||||
/* Record is not filtered but enabled, disable it */
|
||||
if (fl == FTRACE_FL_ENABLED)
|
||||
rec->flags &= ~FTRACE_FL_ENABLED;
|
||||
} else {
|
||||
new = ftrace_call_replace(ip, FTRACE_ADDR);
|
||||
else
|
||||
/* Otherwise record is filtered but not enabled, enable it */
|
||||
rec->flags |= FTRACE_FL_ENABLED;
|
||||
}
|
||||
} else {
|
||||
/* Disable or not filtered */
|
||||
|
||||
if (enable) {
|
||||
/*
|
||||
* If this record is set not to trace and is
|
||||
* not enabled, do nothing.
|
||||
*/
|
||||
fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
|
||||
if (fl == FTRACE_FL_NOTRACE)
|
||||
return 0;
|
||||
|
||||
new = ftrace_call_replace(ip, FTRACE_ADDR);
|
||||
} else
|
||||
old = ftrace_call_replace(ip, FTRACE_ADDR);
|
||||
|
||||
if (enable) {
|
||||
/* if record is enabled, do nothing */
|
||||
if (rec->flags & FTRACE_FL_ENABLED)
|
||||
return 0;
|
||||
|
||||
rec->flags |= FTRACE_FL_ENABLED;
|
||||
|
||||
} else {
|
||||
|
||||
/* if record is not enabled do nothing */
|
||||
if (!(rec->flags & FTRACE_FL_ENABLED))
|
||||
return 0;
|
||||
|
||||
rec->flags &= ~FTRACE_FL_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
call = ftrace_call_replace(ip, FTRACE_ADDR);
|
||||
|
||||
if (rec->flags & FTRACE_FL_ENABLED) {
|
||||
old = nop;
|
||||
new = call;
|
||||
} else {
|
||||
old = call;
|
||||
new = nop;
|
||||
}
|
||||
|
||||
return ftrace_modify_code(ip, old, new);
|
||||
}
|
||||
|
||||
static void ftrace_replace_code(int enable)
|
||||
{
|
||||
int i, failed;
|
||||
unsigned char *new = NULL, *old = NULL;
|
||||
unsigned char *nop = NULL;
|
||||
struct dyn_ftrace *rec;
|
||||
struct ftrace_page *pg;
|
||||
|
||||
if (enable)
|
||||
old = ftrace_nop_replace();
|
||||
else
|
||||
new = ftrace_nop_replace();
|
||||
nop = ftrace_nop_replace();
|
||||
|
||||
for (pg = ftrace_pages_start; pg; pg = pg->next) {
|
||||
for (i = 0; i < pg->index; i++) {
|
||||
@ -433,7 +426,7 @@ static void ftrace_replace_code(int enable)
|
||||
unfreeze_record(rec);
|
||||
}
|
||||
|
||||
failed = __ftrace_replace_code(rec, old, new, enable);
|
||||
failed = __ftrace_replace_code(rec, nop, enable);
|
||||
if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
|
||||
rec->flags |= FTRACE_FL_FAILED;
|
||||
if ((system_state == SYSTEM_BOOTING) ||
|
||||
@ -534,8 +527,7 @@ static void ftrace_startup(void)
|
||||
|
||||
mutex_lock(&ftrace_start_lock);
|
||||
ftrace_start++;
|
||||
if (ftrace_start == 1)
|
||||
command |= FTRACE_ENABLE_CALLS;
|
||||
command |= FTRACE_ENABLE_CALLS;
|
||||
|
||||
if (saved_ftrace_func != ftrace_trace_function) {
|
||||
saved_ftrace_func = ftrace_trace_function;
|
||||
@ -734,6 +726,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
((iter->flags & FTRACE_ITER_FAILURES) &&
|
||||
!(rec->flags & FTRACE_FL_FAILED)) ||
|
||||
|
||||
((iter->flags & FTRACE_ITER_FILTER) &&
|
||||
!(rec->flags & FTRACE_FL_FILTER)) ||
|
||||
|
||||
((iter->flags & FTRACE_ITER_NOTRACE) &&
|
||||
!(rec->flags & FTRACE_FL_NOTRACE))) {
|
||||
rec = NULL;
|
||||
@ -1186,7 +1181,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
|
||||
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
mutex_lock(&ftrace_start_lock);
|
||||
if (iter->filtered && ftrace_start && ftrace_enabled)
|
||||
if (ftrace_start && ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
mutex_unlock(&ftrace_start_lock);
|
||||
mutex_unlock(&ftrace_sysctl_lock);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user