mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/nfc/microread/mei.c net/netfilter/nfnetlink_queue_core.c Pull in 'net' to get Eric Biederman's AF_UNIX fix, upon which some cleanups are going to go on-top. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d978a6361a
@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
|
||||
enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
|
||||
power_save - Automatic power-saving timeout (in second, 0 =
|
||||
disable)
|
||||
power_save_controller - Support runtime D3 of HD-audio controller
|
||||
(-1 = on for supported chip (default), false = off,
|
||||
true = force to on even for unsupported hardware)
|
||||
power_save_controller - Reset HD-audio controller in power-saving mode
|
||||
(default = on)
|
||||
align_buffer_size - Force rounding of buffer/period sizes to multiples
|
||||
of 128 bytes. This is more efficient in terms of memory
|
||||
access but isn't required by the HDA spec and prevents
|
||||
|
@ -6953,7 +6953,6 @@ F: drivers/scsi/st*
|
||||
|
||||
SCTP PROTOCOL
|
||||
M: Vlad Yasevich <vyasevich@gmail.com>
|
||||
M: Sridhar Samudrala <sri@us.ibm.com>
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
L: linux-sctp@vger.kernel.org
|
||||
W: http://lksctp.sourceforge.net
|
||||
|
@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
|
||||
default 8
|
||||
|
||||
config IWMMXT
|
||||
bool "Enable iWMMXt support"
|
||||
bool "Enable iWMMXt support" if !CPU_PJ4
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
|
||||
help
|
||||
Enable support for iWMMXt context switching at run time if
|
||||
running on a CPU that supports it.
|
||||
@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
|
||||
to deadlock. This workaround puts DSB before executing ISB if
|
||||
an abort may occur on cache maintenance.
|
||||
|
||||
config ARM_ERRATA_798181
|
||||
bool "ARM errata: TLBI/DSB failure on Cortex-A15"
|
||||
depends on CPU_V7 && SMP
|
||||
help
|
||||
On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
|
||||
adequately shooting down all use of the old entries. This
|
||||
option enables the Linux kernel workaround for this erratum
|
||||
which sends an IPI to the CPUs that are running the same ASID
|
||||
as the one being invalidated.
|
||||
|
||||
endmenu
|
||||
|
||||
source "arch/arm/common/Kconfig"
|
||||
|
@ -24,7 +24,7 @@ extern struct arm_delay_ops {
|
||||
void (*delay)(unsigned long);
|
||||
void (*const_udelay)(unsigned long);
|
||||
void (*udelay)(unsigned long);
|
||||
bool const_clock;
|
||||
unsigned long ticks_per_jiffy;
|
||||
} arm_delay_ops;
|
||||
|
||||
#define __delay(n) arm_delay_ops.delay(n)
|
||||
|
@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Needed to be able to broadcast the TLB invalidation for kmap.
|
||||
*/
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
#undef ARCH_NEEDS_KMAP_HIGH_GET
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
|
||||
extern void *kmap_high_get(struct page *page);
|
||||
#else
|
||||
|
@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
|
||||
DECLARE_PER_CPU(atomic64_t, active_asids);
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void)
|
||||
isb();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
static inline void dummy_flush_tlb_a15_erratum(void)
|
||||
{
|
||||
/*
|
||||
* Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
|
||||
*/
|
||||
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
|
||||
dsb();
|
||||
}
|
||||
#else
|
||||
static inline void dummy_flush_tlb_a15_erratum(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* flush_pmd_entry
|
||||
*
|
||||
|
@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
|
||||
*/
|
||||
|
||||
.macro mcount_enter
|
||||
/*
|
||||
* This pad compensates for the push {lr} at the call site. Note that we are
|
||||
* unable to unwind through a function which does not otherwise save its lr.
|
||||
*/
|
||||
UNWIND(.pad #4)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
UNWIND(.save {r0-r3, lr})
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
|
||||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
UNWIND(.fnstart)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
|
||||
#else
|
||||
__mcount
|
||||
#endif
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_caller)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_graph_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#endif
|
||||
|
||||
|
@ -267,7 +267,7 @@ __create_page_tables:
|
||||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
||||
#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
sub r4, r4, #4 @ Fixup page table pointer
|
||||
@ for 64-bit descriptors
|
||||
#endif
|
||||
|
@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
|
||||
}
|
||||
|
||||
if (err) {
|
||||
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||
pr_warn_once("CPU %d debug is powered down!\n", cpu);
|
||||
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
|
||||
return;
|
||||
}
|
||||
@ -987,7 +987,7 @@ clear_vcr:
|
||||
isb();
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to disable vector catch\n", cpu);
|
||||
pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1007,7 +1007,7 @@ clear_vcr:
|
||||
}
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
|
||||
printk("%s", buf);
|
||||
}
|
||||
|
||||
static void __init cpuid_init_hwcaps(void)
|
||||
{
|
||||
unsigned int divide_instrs;
|
||||
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
||||
return;
|
||||
|
||||
divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
|
||||
|
||||
switch (divide_instrs) {
|
||||
case 2:
|
||||
elf_hwcap |= HWCAP_IDIVA;
|
||||
case 1:
|
||||
elf_hwcap |= HWCAP_IDIVT;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init feat_v6_fixup(void)
|
||||
{
|
||||
int id = read_cpuid_id();
|
||||
@ -483,8 +500,11 @@ static void __init setup_processor(void)
|
||||
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
|
||||
list->elf_name, ENDIANNESS);
|
||||
elf_hwcap = list->elf_hwcap;
|
||||
|
||||
cpuid_init_hwcaps();
|
||||
|
||||
#ifndef CONFIG_ARM_THUMB
|
||||
elf_hwcap &= ~HWCAP_THUMB;
|
||||
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
|
||||
#endif
|
||||
|
||||
feat_v6_fixup();
|
||||
@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
|
||||
size -= start & ~PAGE_MASK;
|
||||
bank->start = PAGE_ALIGN(start);
|
||||
|
||||
#ifndef CONFIG_LPAE
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (bank->start + size < bank->start) {
|
||||
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
|
||||
"32-bit physical address space\n", (long long)start);
|
||||
|
@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
|
||||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (arm_delay_ops.const_clock)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
unsigned int midr = read_cpuid_id();
|
||||
|
||||
/* Cortex-A15 r0p0..r3p2 affected */
|
||||
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ipi_flush_tlb_a15_erratum(void *arg)
|
||||
{
|
||||
dmb();
|
||||
}
|
||||
|
||||
static void broadcast_tlb_a15_erratum(void)
|
||||
{
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
|
||||
NULL, 1);
|
||||
}
|
||||
|
||||
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t mask = { CPU_BITS_NONE };
|
||||
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
/*
|
||||
* We only need to send an IPI if the other CPUs are running
|
||||
* the same ASID as the one being invalidated. There is no
|
||||
* need for locking around the active_asids check since the
|
||||
* switch_mm() function has at least one dmb() (as required by
|
||||
* this workaround) in case a context switch happens on
|
||||
* another CPU after the condition below.
|
||||
*/
|
||||
if (atomic64_read(&mm->context.id) ==
|
||||
atomic64_read(&per_cpu(active_asids, cpu)))
|
||||
cpumask_set_cpu(cpu, &mask);
|
||||
}
|
||||
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
||||
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
broadcast_tlb_mm_a15_erratum(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
||||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_bp_all(void)
|
||||
|
@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
||||
lr, irq, vgic_cpu->vgic_lr[lr]);
|
||||
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
||||
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
|
||||
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try to use another LR for this interrupt */
|
||||
@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
||||
vgic_cpu->vgic_irq_lr_map[irq] = lr;
|
||||
set_bit(lr, vgic_cpu->lr_used);
|
||||
|
||||
out:
|
||||
if (!vgic_irq_is_edge(vcpu, irq))
|
||||
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
|
||||
|
||||
@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
|
||||
|
||||
/*
|
||||
* We do not need to take the distributor lock here, since the only
|
||||
* action we perform is clearing the irq_active_bit for an EOIed
|
||||
* level interrupt. There is a potential race with
|
||||
* the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
|
||||
* check if the interrupt is already active. Two possibilities:
|
||||
*
|
||||
* - The queuing is occurring on the same vcpu: cannot happen,
|
||||
* as we're already in the context of this vcpu, and
|
||||
* executing the handler
|
||||
* - The interrupt has been migrated to another vcpu, and we
|
||||
* ignore this interrupt for this run. Big deal. It is still
|
||||
* pending though, and will get considered when this vcpu
|
||||
* exits.
|
||||
*/
|
||||
if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
|
||||
/*
|
||||
* Some level interrupts have been EOIed. Clear their
|
||||
@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
} else {
|
||||
vgic_cpu_irq_clear(vcpu, irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Despite being EOIed, the LR may not have
|
||||
* been marked as empty.
|
||||
*/
|
||||
set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
|
||||
vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync back the VGIC state after a guest run. We do not really touch
|
||||
* the distributor here (the irq_pending_on_cpu bit is safe to set),
|
||||
* so there is no need for taking its lock.
|
||||
* Sync back the VGIC state after a guest run. The distributor lock is
|
||||
* needed so we don't get preempted in the middle of the state processing.
|
||||
*/
|
||||
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return;
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
__kvm_vgic_sync_hwstate(vcpu);
|
||||
spin_unlock(&dist->lock);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
|
@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
|
||||
static void __timer_const_udelay(unsigned long xloops)
|
||||
{
|
||||
unsigned long long loops = xloops;
|
||||
loops *= loops_per_jiffy;
|
||||
loops *= arm_delay_ops.ticks_per_jiffy;
|
||||
__timer_delay(loops >> UDELAY_SHIFT);
|
||||
}
|
||||
|
||||
@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
|
||||
pr_info("Switching to timer-based delay loop\n");
|
||||
delay_timer = timer;
|
||||
lpj_fine = timer->freq / HZ;
|
||||
loops_per_jiffy = lpj_fine;
|
||||
|
||||
/* cpufreq may scale loops_per_jiffy, so keep a private copy */
|
||||
arm_delay_ops.ticks_per_jiffy = lpj_fine;
|
||||
arm_delay_ops.delay = __timer_delay;
|
||||
arm_delay_ops.const_udelay = __timer_const_udelay;
|
||||
arm_delay_ops.udelay = __timer_udelay;
|
||||
arm_delay_ops.const_clock = true;
|
||||
|
||||
delay_calibrated = true;
|
||||
} else {
|
||||
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
|
||||
|
@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
|
||||
int lockregs;
|
||||
int i;
|
||||
|
||||
switch (cache_id) {
|
||||
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
case L2X0_CACHE_ID_PART_L310:
|
||||
lockregs = 8;
|
||||
break;
|
||||
@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
if (cache_id_part_number_from_dt)
|
||||
cache_id = cache_id_part_number_from_dt;
|
||||
else
|
||||
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
|
||||
& L2X0_CACHE_ID_PART_MASK;
|
||||
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
|
||||
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
aux &= aux_mask;
|
||||
aux |= aux_val;
|
||||
|
||||
/* Determine the number of ways */
|
||||
switch (cache_id) {
|
||||
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
|
||||
case L2X0_CACHE_ID_PART_L310:
|
||||
if (aux & (1 << 16))
|
||||
ways = 16;
|
||||
@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
|
||||
.flush_all = l2x0_flush_all,
|
||||
.inv_all = l2x0_inv_all,
|
||||
.disable = l2x0_disable,
|
||||
.set_debug = pl310_set_debug,
|
||||
},
|
||||
};
|
||||
|
||||
@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
|
||||
data->save();
|
||||
|
||||
of_init = true;
|
||||
l2x0_init(l2x0_base, aux_val, aux_mask);
|
||||
|
||||
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
|
||||
l2x0_init(l2x0_base, aux_val, aux_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
||||
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
|
||||
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
|
||||
|
||||
static DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
static DEFINE_PER_CPU(u64, reserved_asids);
|
||||
static cpumask_t tlb_flush_pending;
|
||||
|
||||
@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
||||
|
@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
|
||||
static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* In classic MMU format, puds and pmds are folded in to
|
||||
* the pgds. pmd_offset gives the PGD entry. PGDs refer to a
|
||||
* group of L1 entries making up one logical pointer to
|
||||
* an L2 table (2MB), where as PMDs refer to the individual
|
||||
* L1 entries (1MB). Hence increment to get the correct
|
||||
* offset for odd 1MB sections.
|
||||
* (See arch/arm/include/asm/pgtable-2level.h)
|
||||
*/
|
||||
if (addr & SECTION_SIZE)
|
||||
pmd++;
|
||||
#endif
|
||||
do {
|
||||
*pmd = __pmd(phys | type->prot_sect);
|
||||
phys += SECTION_SIZE;
|
||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||
|
||||
flush_pmd_entry(pmd);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
unsigned long next;
|
||||
|
||||
/*
|
||||
* Try a section mapping - end, addr and phys must all be aligned
|
||||
* to a section boundary. Note that PMDs refer to the individual
|
||||
* L1 entries, whereas PGDs refer to a group of L1 entries making
|
||||
* up one logical pointer to an L2 table.
|
||||
*/
|
||||
if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
|
||||
pmd_t *p = pmd;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (addr & SECTION_SIZE)
|
||||
pmd++;
|
||||
#endif
|
||||
|
||||
do {
|
||||
*pmd = __pmd(phys | type->prot_sect);
|
||||
phys += SECTION_SIZE;
|
||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||
|
||||
flush_pmd_entry(p);
|
||||
} else {
|
||||
do {
|
||||
/*
|
||||
* No need to loop; pte's aren't interested in the
|
||||
* individual L1 entries.
|
||||
* With LPAE, we must loop over to map
|
||||
* all the pmds for the given range.
|
||||
*/
|
||||
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
|
||||
}
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
/*
|
||||
* Try a section mapping - addr, next and phys must all be
|
||||
* aligned to a section boundary.
|
||||
*/
|
||||
if (type->prot_sect &&
|
||||
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||
map_init_section(pmd, addr, next, phys, type);
|
||||
} else {
|
||||
alloc_init_pte(pmd, addr, next,
|
||||
__phys_to_pfn(phys), type);
|
||||
}
|
||||
|
||||
phys += next - addr;
|
||||
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
alloc_init_section(pud, addr, next, phys, type);
|
||||
alloc_init_pmd(pud, addr, next, phys, type);
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
|
||||
__v7_ca7mp_proc_info:
|
||||
.long 0x410fc070
|
||||
.long 0xff0ffff0
|
||||
__v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
|
||||
__v7_proc __v7_ca7mp_setup
|
||||
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
|
||||
|
||||
/*
|
||||
@ -430,9 +430,24 @@ __v7_ca7mp_proc_info:
|
||||
__v7_ca15mp_proc_info:
|
||||
.long 0x410fc0f0
|
||||
.long 0xff0ffff0
|
||||
__v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
|
||||
__v7_proc __v7_ca15mp_setup
|
||||
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
|
||||
|
||||
/*
|
||||
* Qualcomm Inc. Krait processors.
|
||||
*/
|
||||
.type __krait_proc_info, #object
|
||||
__krait_proc_info:
|
||||
.long 0x510f0400 @ Required ID value
|
||||
.long 0xff0ffc00 @ Mask for ID
|
||||
/*
|
||||
* Some Krait processors don't indicate support for SDIV and UDIV
|
||||
* instructions in the ARM instruction set, even though they actually
|
||||
* do support them.
|
||||
*/
|
||||
__v7_proc __v7_setup, hwcaps = HWCAP_IDIV
|
||||
.size __krait_proc_info, . - __krait_proc_info
|
||||
|
||||
/*
|
||||
* Match any ARMv7 processor core.
|
||||
*/
|
||||
|
@ -657,7 +657,7 @@ config SNI_RM
|
||||
bool "SNI RM200/300/400"
|
||||
select FW_ARC if CPU_LITTLE_ENDIAN
|
||||
select FW_ARC32 if CPU_LITTLE_ENDIAN
|
||||
select SNIPROM if CPU_BIG_ENDIAN
|
||||
select FW_SNIPROM if CPU_BIG_ENDIAN
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
select BOOT_ELF32
|
||||
select CEVT_R4K
|
||||
@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION
|
||||
config FW_ARC32
|
||||
bool
|
||||
|
||||
config SNIPROM
|
||||
config FW_SNIPROM
|
||||
bool
|
||||
|
||||
config BOOT_ELF32
|
||||
|
@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
|
||||
|
||||
static void octeon_generic_shutdown(void)
|
||||
{
|
||||
int cpu, i;
|
||||
int i;
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
#endif
|
||||
struct cvmx_bootmem_desc *bootmem_desc;
|
||||
void *named_block_array_ptr;
|
||||
|
||||
|
@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
|
||||
*
|
||||
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
|
||||
* Unix names RESETHAND and NODEFER respectively.
|
||||
*
|
||||
* SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
|
||||
* supported its use and no libc was using it, so the entire sa-restorer
|
||||
* functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
|
||||
* retaining only the SA_RESTORER definition as a reminder to avoid
|
||||
* accidental reuse of the mask bit.
|
||||
*/
|
||||
#define SA_ONSTACK 0x08000000
|
||||
#define SA_RESETHAND 0x80000000
|
||||
@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
|
||||
#define SA_NOMASK SA_NODEFER
|
||||
#define SA_ONESHOT SA_RESETHAND
|
||||
|
||||
#define SA_RESTORER 0x04000000 /* Only for o32 */
|
||||
|
||||
#define MINSIGSTKSZ 2048
|
||||
#define SIGSTKSZ 8192
|
||||
|
||||
|
@ -1227,10 +1227,8 @@ __cpuinit void cpu_probe(void)
|
||||
if (c->options & MIPS_CPU_FPU) {
|
||||
c->fpu_id = cpu_get_fpu_id();
|
||||
|
||||
if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M32R2 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R2) {
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
|
||||
if (c->fpu_id & MIPS_FPIR_3D)
|
||||
c->ases |= MIPS_ASE_MIPS3D;
|
||||
}
|
||||
|
@ -46,10 +46,9 @@
|
||||
PTR_L a5, PT_R9(sp)
|
||||
PTR_L a6, PT_R10(sp)
|
||||
PTR_L a7, PT_R11(sp)
|
||||
#else
|
||||
PTR_ADDIU sp, PT_SIZE
|
||||
#endif
|
||||
.endm
|
||||
PTR_ADDIU sp, PT_SIZE
|
||||
.endm
|
||||
|
||||
.macro RETURN_BACK
|
||||
jr ra
|
||||
@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
|
||||
.globl _mcount
|
||||
_mcount:
|
||||
b ftrace_stub
|
||||
addiu sp,sp,8
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp,sp,8
|
||||
#else
|
||||
nop
|
||||
#endif
|
||||
|
||||
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
|
||||
lw t1, function_trace_stop
|
||||
|
@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
||||
#ifdef CONFIG_64BIT
|
||||
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
|
||||
#endif
|
||||
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
|
||||
if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
|
||||
status_set |= ST0_XX;
|
||||
if (cpu_has_dsp)
|
||||
status_set |= ST0_MX;
|
||||
|
@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
|
||||
return;
|
||||
|
||||
default:
|
||||
if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M32R2 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R1 ||
|
||||
c->isa_level == MIPS_CPU_ISA_M64R2) {
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
|
||||
#ifdef CONFIG_MIPS_CPU_SCACHE
|
||||
if (mips_sc_init ()) {
|
||||
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
|
||||
|
@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
|
||||
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
|
||||
|
||||
/* Ignore anything but MIPSxx processors */
|
||||
if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M32R2 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M64R1 &&
|
||||
c->isa_level != MIPS_CPU_ISA_M64R2)
|
||||
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
|
||||
return 0;
|
||||
|
||||
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <asm/mach-au1x00/au1000.h>
|
||||
#include <asm/tlbmisc.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PCI
|
||||
#ifdef CONFIG_PCI_DEBUG
|
||||
#define DBG(x...) printk(KERN_DEBUG x)
|
||||
#else
|
||||
#define DBG(x...) do {} while (0)
|
||||
@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
|
||||
if (status & (1 << 29)) {
|
||||
*data = 0xffffffff;
|
||||
error = -1;
|
||||
DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d",
|
||||
DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
|
||||
access_type, bus->number, device);
|
||||
} else if ((status >> 28) & 0xf) {
|
||||
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
|
||||
|
@ -344,6 +344,7 @@ extern unsigned long MODULES_END;
|
||||
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
|
||||
|
||||
/* Bits in the segment table entry */
|
||||
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
|
||||
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
|
||||
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
|
||||
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
|
||||
@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void);
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
static inline void pgtable_cache_init(void) { }
|
||||
static inline void check_pgt_cache(void) { }
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
|
@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
|
||||
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
|
||||
* contains the (negative) exception code.
|
||||
*/
|
||||
static __always_inline unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long addr, int write)
|
||||
#ifdef CONFIG_64BIT
|
||||
static unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long address, int write)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
return -0x3aUL;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
return -0x3bUL;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
return -0x10UL;
|
||||
if (pmd_large(*pmd)) {
|
||||
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
|
||||
return -0x04UL;
|
||||
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
|
||||
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
table = table + ((address >> 53) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x39UL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_REGION2:
|
||||
table = table + ((address >> 42) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x3aUL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_REGION3:
|
||||
table = table + ((address >> 31) & 0x7ff);
|
||||
if (unlikely(*table & _REGION_ENTRY_INV))
|
||||
return -0x3bUL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
case _ASCE_TYPE_SEGMENT:
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_INV))
|
||||
return -0x10UL;
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
|
||||
if (write && (*table & _SEGMENT_ENTRY_RO))
|
||||
return -0x04UL;
|
||||
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
|
||||
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
|
||||
}
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
}
|
||||
if (unlikely(pmd_bad(*pmd)))
|
||||
return -0x10UL;
|
||||
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
if (!pte_present(*ptep))
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (unlikely(*table & _PAGE_INVALID))
|
||||
return -0x11UL;
|
||||
if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
|
||||
if (write && (*table & _PAGE_RO))
|
||||
return -0x04UL;
|
||||
|
||||
return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
|
||||
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
static unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long address, int write)
|
||||
{
|
||||
unsigned long *table = (unsigned long *)__pa(mm->pgd);
|
||||
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (unlikely(*table & _SEGMENT_ENTRY_INV))
|
||||
return -0x10UL;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (unlikely(*table & _PAGE_INVALID))
|
||||
return -0x11UL;
|
||||
if (write && (*table & _PAGE_RO))
|
||||
return -0x04UL;
|
||||
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
|
||||
size_t n, int write_user)
|
||||
{
|
||||
@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
|
||||
|
||||
static size_t clear_user_pt(size_t n, void __user *to)
|
||||
{
|
||||
void *zpage = &empty_zero_page;
|
||||
void *zpage = (void *) empty_zero_page;
|
||||
long done, size, ret;
|
||||
|
||||
done = 0;
|
||||
|
@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD
|
||||
|
||||
config ACPI_BGRT
|
||||
bool "Boottime Graphics Resource Table support"
|
||||
depends on EFI
|
||||
depends on EFI && X86
|
||||
help
|
||||
This driver adds support for exposing the ACPI Boottime Graphics
|
||||
Resource Table, which allows the operating system to obtain
|
||||
|
@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
|
||||
acpi_handle handle;
|
||||
acpi_status status;
|
||||
|
||||
handle = ACPI_HANDLE(&adapter->dev);
|
||||
handle = ACPI_HANDLE(adapter->dev.parent);
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
|
@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644);
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
|
||||
|
||||
static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
|
||||
static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
|
||||
acpi_cstate);
|
||||
|
||||
static int disabled_by_idle_boot_param(void)
|
||||
{
|
||||
@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = acpi_cstate[index];
|
||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
||||
*/
|
||||
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
||||
{
|
||||
struct acpi_processor_cx *cx = acpi_cstate[index];
|
||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = acpi_cstate[index];
|
||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = acpi_cstate[index];
|
||||
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
|
||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
continue;
|
||||
#endif
|
||||
acpi_cstate[count] = cx;
|
||||
per_cpu(acpi_cstate[count], dev->cpu) = cx;
|
||||
|
||||
count++;
|
||||
if (count == CPUIDLE_STATE_MAX)
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "power.h"
|
||||
|
||||
static DEFINE_MUTEX(dev_pm_qos_mtx);
|
||||
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
struct pm_qos_constraints *c;
|
||||
struct pm_qos_flags *f;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
mutex_lock(&dev_pm_qos_sysfs_mtx);
|
||||
|
||||
/*
|
||||
* If the device's PM QoS resume latency limit or PM QoS flags have been
|
||||
* exposed to user space, they have to be hidden at this point.
|
||||
*/
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
|
||||
@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static void dev_pm_qos_drop_user_request(struct device *dev,
|
||||
enum dev_pm_qos_req_type type)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_drop_user_request(dev, type);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
|
||||
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
|
||||
@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_sysfs_mtx);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->latency_req = req;
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
ret = pm_qos_sysfs_add_latency(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
|
||||
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
*/
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_sysfs_mtx);
|
||||
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
|
||||
|
||||
@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_sysfs_mtx);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->flags_req = req;
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
ret = pm_qos_sysfs_add_flags(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
||||
pm_runtime_put(dev);
|
||||
return ret;
|
||||
}
|
||||
@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
|
||||
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
|
||||
void dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_sysfs_mtx);
|
||||
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
|
||||
|
@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
|
||||
|
||||
static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np, *parent;
|
||||
int ret;
|
||||
|
||||
for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
|
||||
parent = of_find_node_by_path("/cpus");
|
||||
if (!parent) {
|
||||
pr_err("failed to find OF /cpus\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for_each_child_of_node(parent, np) {
|
||||
if (of_get_property(np, "operating-points", NULL))
|
||||
break;
|
||||
}
|
||||
|
@ -14,8 +14,8 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _CPUFREQ_GOVERNER_H
|
||||
#define _CPUFREQ_GOVERNER_H
|
||||
#ifndef _CPUFREQ_GOVERNOR_H
|
||||
#define _CPUFREQ_GOVERNOR_H
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/kobject.h>
|
||||
@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
||||
unsigned int sampling_rate);
|
||||
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
||||
struct cpufreq_policy *policy, unsigned int event);
|
||||
#endif /* _CPUFREQ_GOVERNER_H */
|
||||
#endif /* _CPUFREQ_GOVERNOR_H */
|
||||
|
@ -83,6 +83,7 @@ config INTEL_IOP_ADMA
|
||||
|
||||
config DW_DMAC
|
||||
tristate "Synopsys DesignWare AHB DMA support"
|
||||
depends on GENERIC_HARDIRQS
|
||||
select DMA_ENGINE
|
||||
default y if CPU_AT32AP7000
|
||||
help
|
||||
|
@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
|
||||
* If it can't be trusted, assume that the pin can be used as a GPIO.
|
||||
*/
|
||||
if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
|
||||
return 1;
|
||||
return 0;
|
||||
|
||||
return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
|
||||
}
|
||||
|
@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
|
||||
static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
|
||||
struct device_node *np)
|
||||
{
|
||||
int base = stmpe_gpio->irq_base;
|
||||
int base = 0;
|
||||
|
||||
stmpe_gpio->domain = irq_domain_add_simple(NULL,
|
||||
if (!np)
|
||||
base = stmpe_gpio->irq_base;
|
||||
|
||||
stmpe_gpio->domain = irq_domain_add_simple(np,
|
||||
stmpe_gpio->chip.ngpio, base,
|
||||
&stmpe_gpio_irq_simple_ops, stmpe_gpio);
|
||||
if (!stmpe_gpio->domain) {
|
||||
@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
|
||||
stmpe_gpio->chip = template_chip;
|
||||
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
|
||||
stmpe_gpio->chip.dev = &pdev->dev;
|
||||
#ifdef CONFIG_OF
|
||||
stmpe_gpio->chip.of_node = np;
|
||||
#endif
|
||||
stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
|
||||
|
||||
if (pdata)
|
||||
@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
|
||||
goto out_free;
|
||||
|
||||
if (irq >= 0) {
|
||||
ret = stmpe_gpio_irq_init(stmpe_gpio);
|
||||
ret = stmpe_gpio_irq_init(stmpe_gpio, np);
|
||||
if (ret)
|
||||
goto out_disable;
|
||||
|
||||
|
@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
|
||||
@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)
|
||||
hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
|
||||
return true;
|
||||
break;
|
||||
case USB_VENDOR_ID_ATMEL_V_USB:
|
||||
/* Masterkit MA901 usb radio based on Atmel tiny85 chip and
|
||||
* it has the same USB ID as many Atmel V-USB devices. This
|
||||
* usb radio is handled by radio-ma901.c driver so we want
|
||||
* ignore the hid. Check the name, bus, product and ignore
|
||||
* if we have MA901 usb radio.
|
||||
*/
|
||||
if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
|
||||
hdev->bus == BUS_USB &&
|
||||
strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (hdev->type == HID_TYPE_USBMOUSE &&
|
||||
|
@ -158,6 +158,8 @@
|
||||
#define USB_VENDOR_ID_ATMEL 0x03eb
|
||||
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
|
||||
#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118
|
||||
#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0
|
||||
#define USB_DEVICE_ID_ATMEL_V_USB 0x05df
|
||||
|
||||
#define USB_VENDOR_ID_AUREAL 0x0755
|
||||
#define USB_DEVICE_ID_AUREAL_W01RN 0x2626
|
||||
@ -557,9 +559,6 @@
|
||||
#define USB_VENDOR_ID_MADCATZ 0x0738
|
||||
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
|
||||
|
||||
#define USB_VENDOR_ID_MASTERKIT 0x16c0
|
||||
#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
|
||||
|
||||
#define USB_VENDOR_ID_MCC 0x09db
|
||||
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
|
||||
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
|
||||
|
@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void magicmouse_input_configured(struct hid_device *hdev,
|
||||
struct hid_input *hi)
|
||||
|
||||
{
|
||||
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
|
||||
|
||||
int ret = magicmouse_setup_input(msc->input, hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
|
||||
/* clean msc->input to notify probe() of the failure */
|
||||
msc->input = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int magicmouse_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
/* We do this after hid-input is done parsing reports so that
|
||||
* hid-input uses the most natural button and axis IDs.
|
||||
*/
|
||||
if (msc->input) {
|
||||
ret = magicmouse_setup_input(msc->input, hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
|
||||
goto err_stop_hw;
|
||||
}
|
||||
if (!msc->input) {
|
||||
hid_err(hdev, "magicmouse input not registered\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_stop_hw;
|
||||
}
|
||||
|
||||
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
|
||||
@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {
|
||||
.remove = magicmouse_remove,
|
||||
.raw_event = magicmouse_raw_event,
|
||||
.input_mapping = magicmouse_input_mapping,
|
||||
.input_configured = magicmouse_input_configured,
|
||||
};
|
||||
module_hid_driver(magicmouse_driver);
|
||||
|
||||
|
@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
|
||||
adap->algo = &i2c_dw_algo;
|
||||
adap->dev.parent = &pdev->dev;
|
||||
adap->dev.of_node = pdev->dev.of_node;
|
||||
ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
|
||||
|
||||
r = i2c_add_numbered_adapter(adap);
|
||||
if (r) {
|
||||
|
@ -44,7 +44,7 @@
|
||||
#include "qib.h"
|
||||
#include "qib_7220.h"
|
||||
|
||||
#define SD7220_FW_NAME "intel/sd7220.fw"
|
||||
#define SD7220_FW_NAME "qlogic/sd7220.fw"
|
||||
MODULE_FIRMWARE(SD7220_FW_NAME);
|
||||
|
||||
/*
|
||||
|
@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
|
||||
|
||||
config VIDEO_SH_VEU
|
||||
tristate "SuperH VEU mem2mem video processing driver"
|
||||
depends on VIDEO_DEV && VIDEO_V4L2
|
||||
depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
|
||||
select VIDEOBUF2_DMA_CONTIG
|
||||
select V4L2_MEM2MEM_DEV
|
||||
help
|
||||
|
@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
|
||||
static int usb_ma901radio_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct ma901radio_device *radio;
|
||||
int retval = 0;
|
||||
|
||||
/* Masterkit MA901 usb radio has the same USB ID as many others
|
||||
* Atmel V-USB devices. Let's make additional checks to be sure
|
||||
* that this is our device.
|
||||
*/
|
||||
|
||||
if (dev->product && dev->manufacturer &&
|
||||
(strncmp(dev->product, "MA901", 5) != 0
|
||||
|| strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
|
||||
return -ENODEV;
|
||||
|
||||
radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
|
||||
if (!radio) {
|
||||
dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
|
||||
|
@ -4901,8 +4901,8 @@ static void __exit bonding_exit(void)
|
||||
|
||||
bond_destroy_debugfs();
|
||||
|
||||
rtnl_link_unregister(&bond_link_ops);
|
||||
unregister_pernet_subsys(&bond_net_ops);
|
||||
rtnl_link_unregister(&bond_link_ops);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
|
@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
|
||||
/* how about 0x2000 */
|
||||
#define MAX_TX_BUF_LEN 0x2000
|
||||
#define MAX_TX_BUF_SHIFT 13
|
||||
/*#define MAX_TX_BUF_LEN 0x3000 */
|
||||
#define MAX_TSO_SEG_SIZE 0x3c00
|
||||
|
||||
/* rrs word 1 bit 0:31 */
|
||||
#define RRS_RX_CSUM_MASK 0xFFFF
|
||||
|
@ -2327,6 +2327,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
|
||||
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
|
||||
netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
netdev_err(netdev, "register netdevice failed\n");
|
||||
|
@ -13481,13 +13481,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
{
|
||||
struct bnx2x *bp = params->bp;
|
||||
u16 base_page, next_page, not_kr2_device, lane;
|
||||
int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
|
||||
|
||||
if (!sigdet) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
return;
|
||||
}
|
||||
int sigdet;
|
||||
|
||||
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
|
||||
* Since some switches tend to reinit the AN process and clear the
|
||||
@ -13498,6 +13492,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
vars->check_kr2_recovery_cnt--;
|
||||
return;
|
||||
}
|
||||
|
||||
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
|
||||
if (!sigdet) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
DP(NETIF_MSG_LINK, "No sigdet\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
lane = bnx2x_get_warpcore_lane(phy, params);
|
||||
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
|
||||
MDIO_AER_BLOCK_AER_REG, lane);
|
||||
|
@ -7882,12 +7882,19 @@ static int __init ixgbe_init_module(void)
|
||||
ixgbe_dbg_init();
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
ret = pci_register_driver(&ixgbe_driver);
|
||||
if (ret) {
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
ixgbe_dbg_exit();
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCA
|
||||
dca_register_notify(&dca_notifier);
|
||||
#endif
|
||||
|
||||
ret = pci_register_driver(&ixgbe_driver);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(ixgbe_init_module);
|
||||
|
@ -3872,6 +3872,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl_speed_down(struct rtl8169_private *tp)
|
||||
{
|
||||
u32 adv;
|
||||
int lpa;
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0000);
|
||||
lpa = rtl_readphy(tp, MII_LPA);
|
||||
|
||||
if (lpa & (LPA_10HALF | LPA_10FULL))
|
||||
adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
|
||||
else if (lpa & (LPA_100HALF | LPA_100FULL))
|
||||
adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
|
||||
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
|
||||
else
|
||||
adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
|
||||
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
|
||||
(tp->mii.supports_gmii ?
|
||||
ADVERTISED_1000baseT_Half |
|
||||
ADVERTISED_1000baseT_Full : 0);
|
||||
|
||||
rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
|
||||
adv);
|
||||
}
|
||||
|
||||
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
|
||||
{
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
@ -3904,9 +3928,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
|
||||
if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
|
||||
return false;
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0000);
|
||||
rtl_writephy(tp, MII_BMCR, 0x0000);
|
||||
|
||||
rtl_speed_down(tp);
|
||||
rtl_wol_suspend_quirk(tp);
|
||||
|
||||
return true;
|
||||
|
@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
|
||||
/*
|
||||
* If we go idle, then clearly no "passive-no-rx"
|
||||
* workaround is needed any more, this is a reset.
|
||||
*/
|
||||
iwlagn_lift_passive_no_rx(priv);
|
||||
}
|
||||
|
||||
if (unlikely(!iwl_is_ready(priv))) {
|
||||
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
|
||||
mutex_unlock(&priv->mutex);
|
||||
@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
||||
priv->timestamp = bss_conf->sync_tsf;
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
|
||||
} else {
|
||||
/*
|
||||
* If we disassociate while there are pending
|
||||
* frames, just wake up the queues and let the
|
||||
* frames "escape" ... This shouldn't really
|
||||
* be happening to start with, but we should
|
||||
* not get stuck in this case either since it
|
||||
* can happen if userspace gets confused.
|
||||
*/
|
||||
iwlagn_lift_passive_no_rx(priv);
|
||||
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
if (ctx->ctxid == IWL_RXON_CTX_BSS)
|
||||
|
@ -1193,7 +1193,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
|
||||
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
|
||||
iwl_is_associated_ctx(ctx) && ctx->vif &&
|
||||
ctx->vif &&
|
||||
ctx->vif->type == NL80211_IFTYPE_STATION) {
|
||||
/* block and stop all queues */
|
||||
priv->passive_no_rx = true;
|
||||
|
@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < request->n_channels; i++) {
|
||||
for (i = 0; i < min_t(u32, request->n_channels,
|
||||
MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
|
||||
chan = request->channels[i];
|
||||
priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
|
||||
priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/mei_bus.h>
|
||||
#include <linux/mei_cl_bus.h>
|
||||
|
||||
#include <linux/nfc.h>
|
||||
#include <net/nfc/hci.h>
|
||||
@ -32,9 +32,6 @@
|
||||
|
||||
#define MICROREAD_DRIVER_NAME "microread"
|
||||
|
||||
#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
|
||||
0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
|
||||
|
||||
struct mei_nfc_hdr {
|
||||
u8 cmd;
|
||||
u8 status;
|
||||
@ -48,7 +45,7 @@ struct mei_nfc_hdr {
|
||||
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
|
||||
|
||||
struct microread_mei_phy {
|
||||
struct mei_device *device;
|
||||
struct mei_cl_device *device;
|
||||
struct nfc_hci_dev *hdev;
|
||||
|
||||
int powered;
|
||||
@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb)
|
||||
|
||||
MEI_DUMP_SKB_OUT("mei frame sent", skb);
|
||||
|
||||
r = mei_send(phy->device, skb->data, skb->len);
|
||||
r = mei_cl_send(phy->device, skb->data, skb->len);
|
||||
if (r > 0)
|
||||
r = 0;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void microread_event_cb(struct mei_device *device, u32 events,
|
||||
static void microread_event_cb(struct mei_cl_device *device, u32 events,
|
||||
void *context)
|
||||
{
|
||||
struct microread_mei_phy *phy = context;
|
||||
@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
|
||||
if (phy->hard_fault != 0)
|
||||
return;
|
||||
|
||||
if (events & BIT(MEI_EVENT_RX)) {
|
||||
if (events & BIT(MEI_CL_EVENT_RX)) {
|
||||
struct sk_buff *skb;
|
||||
int reply_size;
|
||||
|
||||
@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ);
|
||||
reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
|
||||
if (reply_size < MEI_NFC_HEADER_SIZE) {
|
||||
kfree(skb);
|
||||
return;
|
||||
@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = {
|
||||
.disable = microread_mei_disable,
|
||||
};
|
||||
|
||||
static int microread_mei_probe(struct mei_device *device,
|
||||
const struct mei_id *id)
|
||||
static int microread_mei_probe(struct mei_cl_device *device,
|
||||
const struct mei_cl_device_id *id)
|
||||
{
|
||||
struct microread_mei_phy *phy;
|
||||
int r;
|
||||
@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device,
|
||||
}
|
||||
|
||||
phy->device = device;
|
||||
mei_set_clientdata(device, phy);
|
||||
mei_cl_set_drvdata(device, phy);
|
||||
|
||||
r = mei_register_event_cb(device, microread_event_cb, phy);
|
||||
r = mei_cl_register_event_cb(device, microread_event_cb, phy);
|
||||
if (r) {
|
||||
pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
|
||||
goto err_out;
|
||||
@ -186,9 +183,9 @@ err_out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int microread_mei_remove(struct mei_device *device)
|
||||
static int microread_mei_remove(struct mei_cl_device *device)
|
||||
{
|
||||
struct microread_mei_phy *phy = mei_get_clientdata(device);
|
||||
struct microread_mei_phy *phy = mei_cl_get_drvdata(device);
|
||||
|
||||
pr_info("Removing microread\n");
|
||||
|
||||
@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mei_id microread_mei_tbl[] = {
|
||||
{ MICROREAD_DRIVER_NAME, MICROREAD_UUID },
|
||||
static struct mei_cl_device_id microread_mei_tbl[] = {
|
||||
{ MICROREAD_DRIVER_NAME },
|
||||
|
||||
/* required last entry */
|
||||
{ }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
|
||||
|
||||
static struct mei_driver microread_driver = {
|
||||
static struct mei_cl_driver microread_driver = {
|
||||
.id_table = microread_mei_tbl,
|
||||
.name = MICROREAD_DRIVER_NAME,
|
||||
|
||||
@ -225,7 +221,7 @@ static int microread_mei_init(void)
|
||||
|
||||
pr_debug(DRIVER_DESC ": %s\n", __func__);
|
||||
|
||||
r = mei_driver_register(µread_driver);
|
||||
r = mei_cl_driver_register(µread_driver);
|
||||
if (r) {
|
||||
pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
|
||||
return r;
|
||||
@ -236,7 +232,7 @@ static int microread_mei_init(void)
|
||||
|
||||
static void microread_mei_exit(void)
|
||||
{
|
||||
mei_driver_unregister(µread_driver);
|
||||
mei_cl_driver_unregister(µread_driver);
|
||||
}
|
||||
|
||||
module_init(microread_mei_init);
|
||||
|
@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated);
|
||||
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
|
||||
static void __iomem *at91_rtc_regs;
|
||||
static int irq;
|
||||
static u32 at91_rtc_imr;
|
||||
|
||||
/*
|
||||
* Decode time/date into rtc_time structure
|
||||
@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
|
||||
cr = at91_rtc_read(AT91_RTC_CR);
|
||||
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
|
||||
|
||||
at91_rtc_imr |= AT91_RTC_ACKUPD;
|
||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
|
||||
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
|
||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
|
||||
at91_rtc_imr &= ~AT91_RTC_ACKUPD;
|
||||
|
||||
at91_rtc_write(AT91_RTC_TIMR,
|
||||
bin2bcd(tm->tm_sec) << 0
|
||||
@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
|
||||
tm->tm_year = at91_alarm_year - 1900;
|
||||
|
||||
alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
|
||||
alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
|
||||
? 1 : 0;
|
||||
|
||||
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
|
||||
@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||
tm.tm_sec = alrm->time.tm_sec;
|
||||
|
||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
||||
at91_rtc_imr &= ~AT91_RTC_ALARM;
|
||||
at91_rtc_write(AT91_RTC_TIMALR,
|
||||
bin2bcd(tm.tm_sec) << 0
|
||||
| bin2bcd(tm.tm_min) << 8
|
||||
@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||
|
||||
if (alrm->enabled) {
|
||||
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
||||
at91_rtc_imr |= AT91_RTC_ALARM;
|
||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
||||
}
|
||||
|
||||
@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
||||
|
||||
if (enabled) {
|
||||
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
||||
at91_rtc_imr |= AT91_RTC_ALARM;
|
||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
||||
} else {
|
||||
} else
|
||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
||||
at91_rtc_imr &= ~AT91_RTC_ALARM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
||||
*/
|
||||
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
|
||||
{
|
||||
unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
|
||||
|
||||
seq_printf(seq, "update_IRQ\t: %s\n",
|
||||
(at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
|
||||
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
|
||||
seq_printf(seq, "periodic_IRQ\t: %s\n",
|
||||
(at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
|
||||
(imr & AT91_RTC_SECEV) ? "yes" : "no");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
|
||||
unsigned int rtsr;
|
||||
unsigned long events = 0;
|
||||
|
||||
rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
|
||||
rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
|
||||
if (rtsr) { /* this interrupt is shared! Is it ours? */
|
||||
if (rtsr & AT91_RTC_ALARM)
|
||||
events |= (RTC_AF | RTC_IRQF);
|
||||
@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
|
||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
||||
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
||||
AT91_RTC_CALEV);
|
||||
at91_rtc_imr = 0;
|
||||
|
||||
ret = request_irq(irq, at91_rtc_interrupt,
|
||||
IRQF_SHARED,
|
||||
@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
|
||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
||||
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
||||
AT91_RTC_CALEV);
|
||||
at91_rtc_imr = 0;
|
||||
free_irq(irq, pdev);
|
||||
|
||||
rtc_device_unregister(rtc);
|
||||
@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
|
||||
|
||||
/* AT91RM9200 RTC Power management control */
|
||||
|
||||
static u32 at91_rtc_bkpimr;
|
||||
|
||||
static u32 at91_rtc_imr;
|
||||
|
||||
static int at91_rtc_suspend(struct device *dev)
|
||||
{
|
||||
/* this IRQ is shared with DBGU and other hardware which isn't
|
||||
* necessarily doing PM like we are...
|
||||
*/
|
||||
at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
|
||||
if (at91_rtc_bkpimr) {
|
||||
if (device_may_wakeup(dev)) {
|
||||
at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
|
||||
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
|
||||
if (at91_rtc_imr) {
|
||||
if (device_may_wakeup(dev))
|
||||
enable_irq_wake(irq);
|
||||
} else {
|
||||
at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
|
||||
at91_rtc_imr &= ~at91_rtc_bkpimr;
|
||||
}
|
||||
}
|
||||
else
|
||||
at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int at91_rtc_resume(struct device *dev)
|
||||
{
|
||||
if (at91_rtc_bkpimr) {
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (at91_rtc_imr) {
|
||||
if (device_may_wakeup(dev))
|
||||
disable_irq_wake(irq);
|
||||
} else {
|
||||
at91_rtc_imr |= at91_rtc_bkpimr;
|
||||
at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
|
||||
}
|
||||
else
|
||||
at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -64,6 +64,7 @@
|
||||
#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
|
||||
#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
|
||||
#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
|
||||
#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
|
||||
|
||||
#define AT91_RTC_VER 0x2c /* Valid Entry Register */
|
||||
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
|
||||
|
@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
|
||||
case EQC_WR_PROHIBIT:
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state != SCM_WR_PROHIBIT)
|
||||
pr_info("%lu: Write access to the SCM increment is suspended\n",
|
||||
pr_info("%lx: Write access to the SCM increment is suspended\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_WR_PROHIBIT;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
|
||||
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state == SCM_WR_PROHIBIT)
|
||||
pr_info("%lu: Write access to the SCM increment is restored\n",
|
||||
pr_info("%lx: Write access to the SCM increment is restored\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_OPER;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
@ -463,12 +463,15 @@ static int __init scm_blk_init(void)
|
||||
goto out;
|
||||
|
||||
scm_major = ret;
|
||||
if (scm_alloc_rqs(nr_requests))
|
||||
ret = scm_alloc_rqs(nr_requests);
|
||||
if (ret)
|
||||
goto out_unreg;
|
||||
|
||||
scm_debug = debug_register("scm_log", 16, 1, 16);
|
||||
if (!scm_debug)
|
||||
if (!scm_debug) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
debug_register_view(scm_debug, &debug_hex_ascii_view);
|
||||
debug_set_level(scm_debug, 2);
|
||||
|
@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)
|
||||
|
||||
switch (event) {
|
||||
case SCM_CHANGE:
|
||||
pr_info("%lu: The capabilities of the SCM increment changed\n",
|
||||
pr_info("%lx: The capabilities of the SCM increment changed\n",
|
||||
(unsigned long) scmdev->address);
|
||||
SCM_LOG(2, "State changed");
|
||||
SCM_LOG_STATE(2, scmdev);
|
||||
|
@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
|
||||
int i, rc;
|
||||
|
||||
/* Check if the tty3270 is already there. */
|
||||
view = raw3270_find_view(&tty3270_fn, tty->index);
|
||||
view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
|
||||
if (!IS_ERR(view)) {
|
||||
tp = container_of(view, struct tty3270, view);
|
||||
tty->driver_data = tp;
|
||||
@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
|
||||
tp->inattr = TF_INPUT;
|
||||
return tty_port_install(&tp->port, driver, tty);
|
||||
}
|
||||
if (tty3270_max_index < tty->index)
|
||||
tty3270_max_index = tty->index;
|
||||
if (tty3270_max_index < tty->index + 1)
|
||||
tty3270_max_index = tty->index + 1;
|
||||
|
||||
/* Allocate tty3270 structure on first open. */
|
||||
tp = tty3270_alloc_view();
|
||||
if (IS_ERR(tp))
|
||||
return PTR_ERR(tp);
|
||||
|
||||
rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
|
||||
rc = raw3270_add_view(&tp->view, &tty3270_fn,
|
||||
tty->index + RAW3270_FIRSTMINOR);
|
||||
if (rc) {
|
||||
tty3270_free_view(tp);
|
||||
return rc;
|
||||
@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = {
|
||||
|
||||
void tty3270_create_cb(int minor)
|
||||
{
|
||||
tty_register_device(tty3270_driver, minor, NULL);
|
||||
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
|
||||
}
|
||||
|
||||
void tty3270_destroy_cb(int minor)
|
||||
{
|
||||
tty_unregister_device(tty3270_driver, minor);
|
||||
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
|
||||
}
|
||||
|
||||
struct raw3270_notifier tty3270_notifier =
|
||||
@ -1884,7 +1885,8 @@ static int __init tty3270_init(void)
|
||||
driver->driver_name = "tty3270";
|
||||
driver->name = "3270/tty";
|
||||
driver->major = IBM_TTY3270_MAJOR;
|
||||
driver->minor_start = 0;
|
||||
driver->minor_start = RAW3270_FIRSTMINOR;
|
||||
driver->name_base = RAW3270_FIRSTMINOR;
|
||||
driver->type = TTY_DRIVER_TYPE_SYSTEM;
|
||||
driver->subtype = SYSTEM_TYPE_TTY;
|
||||
driver->init_termios = tty_std_termios;
|
||||
|
@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers"
|
||||
|
||||
config SPI_ALTERA
|
||||
tristate "Altera SPI Controller"
|
||||
depends on GENERIC_HARDIRQS
|
||||
select SPI_BITBANG
|
||||
help
|
||||
This is the driver for the Altera SPI Controller.
|
||||
@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA
|
||||
|
||||
config SPI_PXA2XX
|
||||
tristate "PXA2xx SSP SPI master"
|
||||
depends on ARCH_PXA || PCI || ACPI
|
||||
depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
|
||||
select PXA_SSP if ARCH_PXA
|
||||
help
|
||||
This enables using a PXA2xx or Sodaville SSP port as a SPI master
|
||||
|
@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
|
||||
static int bcm63xx_spi_setup(struct spi_device *spi)
|
||||
{
|
||||
struct bcm63xx_spi *bs;
|
||||
int ret;
|
||||
|
||||
bs = spi_master_get_devdata(spi->master);
|
||||
|
||||
@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
|
||||
default:
|
||||
dev_err(dev, "unsupported MSG_CTL width: %d\n",
|
||||
bs->msg_ctl_width);
|
||||
goto out_clk_disable;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Initialize hardware */
|
||||
|
@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
|
||||
|
||||
for (i = count; i > 0; i--) {
|
||||
data = tx_buf ? *tx_buf++ : 0;
|
||||
if (len == EOFBYTE)
|
||||
if (len == EOFBYTE && t->cs_change)
|
||||
setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
|
||||
out_8(&fifo->txdata_8, data);
|
||||
len--;
|
||||
|
@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
|
||||
|
||||
master->dev.parent = &pdev->dev;
|
||||
master->dev.of_node = pdev->dev.of_node;
|
||||
ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
|
||||
/* the spi->mode bits understood by this driver: */
|
||||
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
|
||||
|
||||
|
@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
|
||||
{
|
||||
struct s3c64xx_spi_driver_data *sdd = data;
|
||||
struct spi_master *spi = sdd->master;
|
||||
unsigned int val;
|
||||
unsigned int val, clr = 0;
|
||||
|
||||
val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
|
||||
val = readl(sdd->regs + S3C64XX_SPI_STATUS);
|
||||
|
||||
val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
|
||||
S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
|
||||
S3C64XX_SPI_PND_TX_OVERRUN_CLR |
|
||||
S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
|
||||
|
||||
writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
|
||||
|
||||
if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
|
||||
if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
|
||||
clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
|
||||
dev_err(&spi->dev, "RX overrun\n");
|
||||
if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
|
||||
}
|
||||
if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
|
||||
clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
|
||||
dev_err(&spi->dev, "RX underrun\n");
|
||||
if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
|
||||
}
|
||||
if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
|
||||
clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
|
||||
dev_err(&spi->dev, "TX overrun\n");
|
||||
if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
|
||||
}
|
||||
if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
|
||||
clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
|
||||
dev_err(&spi->dev, "TX underrun\n");
|
||||
}
|
||||
|
||||
/* Clear the pending irq by setting and then clearing it */
|
||||
writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
|
||||
writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
|
||||
writel(0, regs + S3C64XX_SPI_MODE_CFG);
|
||||
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
|
||||
|
||||
/* Clear any irq pending bits */
|
||||
writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
|
||||
regs + S3C64XX_SPI_PENDING_CLR);
|
||||
/* Clear any irq pending bits, should set and clear the bits */
|
||||
val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
|
||||
S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
|
||||
S3C64XX_SPI_PND_TX_OVERRUN_CLR |
|
||||
S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
|
||||
writel(val, regs + S3C64XX_SPI_PENDING_CLR);
|
||||
writel(0, regs + S3C64XX_SPI_PENDING_CLR);
|
||||
|
||||
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
|
||||
|
||||
|
@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_slink_prepare_transfer(struct spi_master *master)
|
||||
{
|
||||
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
|
||||
|
||||
return pm_runtime_get_sync(tspi->dev);
|
||||
}
|
||||
|
||||
static int tegra_slink_unprepare_transfer(struct spi_master *master)
|
||||
{
|
||||
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
|
||||
|
||||
pm_runtime_put(tspi->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_slink_transfer_one_message(struct spi_master *master,
|
||||
struct spi_message *msg)
|
||||
{
|
||||
@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
|
||||
|
||||
msg->status = 0;
|
||||
msg->actual_length = 0;
|
||||
ret = pm_runtime_get_sync(tspi->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(tspi->dev, "runtime get failed: %d\n", ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
single_xfer = list_is_singular(&msg->transfers);
|
||||
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
||||
INIT_COMPLETION(tspi->xfer_completion);
|
||||
@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
|
||||
exit:
|
||||
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
|
||||
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
|
||||
pm_runtime_put(tspi->dev);
|
||||
done:
|
||||
msg->status = ret;
|
||||
spi_finalize_current_message(master);
|
||||
return ret;
|
||||
@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
||||
/* the spi->mode bits understood by this driver: */
|
||||
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
||||
master->setup = tegra_slink_setup;
|
||||
master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
|
||||
master->transfer_one_message = tegra_slink_transfer_one_message;
|
||||
master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
|
||||
master->num_chipselect = MAX_CHIP_SELECT;
|
||||
master->bus_num = -1;
|
||||
|
||||
|
@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work)
|
||||
/* Lock queue and check for queue work */
|
||||
spin_lock_irqsave(&master->queue_lock, flags);
|
||||
if (list_empty(&master->queue) || !master->running) {
|
||||
if (master->busy && master->unprepare_transfer_hardware) {
|
||||
ret = master->unprepare_transfer_hardware(master);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&master->queue_lock, flags);
|
||||
dev_err(&master->dev,
|
||||
"failed to unprepare transfer hardware\n");
|
||||
return;
|
||||
}
|
||||
if (!master->busy) {
|
||||
spin_unlock_irqrestore(&master->queue_lock, flags);
|
||||
return;
|
||||
}
|
||||
master->busy = false;
|
||||
spin_unlock_irqrestore(&master->queue_lock, flags);
|
||||
if (master->unprepare_transfer_hardware &&
|
||||
master->unprepare_transfer_hardware(master))
|
||||
dev_err(&master->dev,
|
||||
"failed to unprepare transfer hardware\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master)
|
||||
acpi_status status;
|
||||
acpi_handle handle;
|
||||
|
||||
handle = ACPI_HANDLE(&master->dev);
|
||||
handle = ACPI_HANDLE(master->dev.parent);
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
|
@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev)
|
||||
{
|
||||
struct usb_port *port_dev = to_usb_port(dev);
|
||||
|
||||
dev_pm_qos_hide_flags(dev);
|
||||
kfree(port_dev);
|
||||
}
|
||||
|
||||
|
@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm,
|
||||
fbmode->vmode = 0;
|
||||
if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
|
||||
fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
|
||||
if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
|
||||
if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
|
||||
fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
|
||||
if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
|
||||
fbmode->vmode |= FB_VMODE_INTERLACED;
|
||||
|
@ -1973,7 +1973,8 @@ static int uvesafb_init(void)
|
||||
err = -ENOMEM;
|
||||
|
||||
if (err) {
|
||||
platform_device_put(uvesafb_device);
|
||||
if (uvesafb_device)
|
||||
platform_device_put(uvesafb_device);
|
||||
platform_driver_unregister(&uvesafb_driver);
|
||||
cn_del_callback(&uvesafb_cn_id);
|
||||
return err;
|
||||
|
@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
|
||||
fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
|
||||
qlogic/12160.bin
|
||||
fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
|
||||
fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw
|
||||
fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw
|
||||
fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
|
||||
fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
|
||||
ess/maestro3_assp_minisrc.fw
|
||||
|
@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle,
|
||||
if (split_flag & EXT4_EXT_DATA_VALID1) {
|
||||
err = ext4_ext_zeroout(inode, ex2);
|
||||
zero_ex.ee_block = ex2->ee_block;
|
||||
zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
|
||||
zero_ex.ee_len = cpu_to_le16(
|
||||
ext4_ext_get_actual_len(ex2));
|
||||
ext4_ext_store_pblock(&zero_ex,
|
||||
ext4_ext_pblock(ex2));
|
||||
} else {
|
||||
err = ext4_ext_zeroout(inode, ex);
|
||||
zero_ex.ee_block = ex->ee_block;
|
||||
zero_ex.ee_len = ext4_ext_get_actual_len(ex);
|
||||
zero_ex.ee_len = cpu_to_le16(
|
||||
ext4_ext_get_actual_len(ex));
|
||||
ext4_ext_store_pblock(&zero_ex,
|
||||
ext4_ext_pblock(ex));
|
||||
}
|
||||
} else {
|
||||
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||
zero_ex.ee_block = orig_ex.ee_block;
|
||||
zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
|
||||
zero_ex.ee_len = cpu_to_le16(
|
||||
ext4_ext_get_actual_len(&orig_ex));
|
||||
ext4_ext_store_pblock(&zero_ex,
|
||||
ext4_ext_pblock(&orig_ex));
|
||||
}
|
||||
@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
if (err)
|
||||
goto out;
|
||||
zero_ex.ee_block = ex->ee_block;
|
||||
zero_ex.ee_len = ext4_ext_get_actual_len(ex);
|
||||
zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
|
||||
ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
|
||||
|
||||
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||
|
@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
|
||||
blk = *i_data;
|
||||
if (level > 0) {
|
||||
ext4_lblk_t first2;
|
||||
bh = sb_bread(inode->i_sb, blk);
|
||||
bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
|
||||
if (!bh) {
|
||||
EXT4_ERROR_INODE_BLOCK(inode, blk,
|
||||
EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
|
||||
"Read failure");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
||||
cmd = F_SETLK;
|
||||
fl->fl_type = F_UNLCK;
|
||||
}
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
|
||||
if (fl->fl_type == F_UNLCK)
|
||||
posix_lock_file_wait(file, fl);
|
||||
return -EIO;
|
||||
}
|
||||
if (IS_GETLK(cmd))
|
||||
return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
|
||||
else if (fl->fl_type == F_UNLCK)
|
||||
|
@ -588,6 +588,7 @@ struct lm_lockstruct {
|
||||
struct dlm_lksb ls_control_lksb; /* control_lock */
|
||||
char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
|
||||
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
|
||||
char *ls_lvb_bits;
|
||||
|
||||
spinlock_t ls_recover_spin; /* protects following fields */
|
||||
unsigned long ls_recover_flags; /* DFL_ */
|
||||
|
@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
|
||||
|
||||
static int all_jid_bits_clear(char *lvb)
|
||||
{
|
||||
int i;
|
||||
for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) {
|
||||
if (lvb[i])
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
|
||||
GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
|
||||
}
|
||||
|
||||
static void sync_wait_cb(void *arg)
|
||||
@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
{
|
||||
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
char lvb_bits[GDLM_LVB_SIZE];
|
||||
uint32_t block_gen, start_gen, lvb_gen, flags;
|
||||
int recover_set = 0;
|
||||
int write_lvb = 0;
|
||||
@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
control_lvb_read(ls, &lvb_gen, lvb_bits);
|
||||
control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
|
||||
|
||||
spin_lock(&ls->ls_recover_spin);
|
||||
if (block_gen != ls->ls_recover_block ||
|
||||
@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
|
||||
ls->ls_recover_result[i] = 0;
|
||||
|
||||
if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET))
|
||||
if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
|
||||
continue;
|
||||
|
||||
__clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
|
||||
__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
|
||||
write_lvb = 1;
|
||||
}
|
||||
}
|
||||
@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
continue;
|
||||
if (ls->ls_recover_submit[i] < start_gen) {
|
||||
ls->ls_recover_submit[i] = 0;
|
||||
__set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
|
||||
__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
|
||||
}
|
||||
}
|
||||
/* even if there are no bits to set, we need to write the
|
||||
@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
spin_unlock(&ls->ls_recover_spin);
|
||||
|
||||
if (write_lvb) {
|
||||
control_lvb_write(ls, start_gen, lvb_bits);
|
||||
control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
|
||||
flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
|
||||
} else {
|
||||
flags = DLM_LKF_CONVERT;
|
||||
@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
*/
|
||||
|
||||
for (i = 0; i < recover_size; i++) {
|
||||
if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) {
|
||||
if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
|
||||
fs_info(sdp, "recover generation %u jid %d\n",
|
||||
start_gen, i);
|
||||
gfs2_recover_set(sdp, i);
|
||||
@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work)
|
||||
static int control_mount(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
char lvb_bits[GDLM_LVB_SIZE];
|
||||
uint32_t start_gen, block_gen, mount_gen, lvb_gen;
|
||||
int mounted_mode;
|
||||
int retries = 0;
|
||||
@ -857,7 +851,7 @@ locks_done:
|
||||
* lvb_gen will be non-zero.
|
||||
*/
|
||||
|
||||
control_lvb_read(ls, &lvb_gen, lvb_bits);
|
||||
control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
|
||||
|
||||
if (lvb_gen == 0xFFFFFFFF) {
|
||||
/* special value to force mount attempts to fail */
|
||||
@ -887,7 +881,7 @@ locks_done:
|
||||
* and all lvb bits to be clear (no pending journal recoveries.)
|
||||
*/
|
||||
|
||||
if (!all_jid_bits_clear(lvb_bits)) {
|
||||
if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
|
||||
/* journals need recovery, wait until all are clear */
|
||||
fs_info(sdp, "control_mount wait for journal recovery\n");
|
||||
goto restart;
|
||||
@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word)
|
||||
static int control_first_done(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
char lvb_bits[GDLM_LVB_SIZE];
|
||||
uint32_t start_gen, block_gen;
|
||||
int error;
|
||||
|
||||
@ -991,8 +984,8 @@ restart:
|
||||
memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
|
||||
spin_unlock(&ls->ls_recover_spin);
|
||||
|
||||
memset(lvb_bits, 0, sizeof(lvb_bits));
|
||||
control_lvb_write(ls, start_gen, lvb_bits);
|
||||
memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
|
||||
control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
|
||||
|
||||
error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
|
||||
if (error)
|
||||
@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
|
||||
uint32_t old_size, new_size;
|
||||
int i, max_jid;
|
||||
|
||||
if (!ls->ls_lvb_bits) {
|
||||
ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
|
||||
if (!ls->ls_lvb_bits)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
max_jid = 0;
|
||||
for (i = 0; i < num_slots; i++) {
|
||||
if (max_jid < slots[i].slot - 1)
|
||||
@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
|
||||
|
||||
static void free_recover_size(struct lm_lockstruct *ls)
|
||||
{
|
||||
kfree(ls->ls_lvb_bits);
|
||||
kfree(ls->ls_recover_submit);
|
||||
kfree(ls->ls_recover_result);
|
||||
ls->ls_recover_submit = NULL;
|
||||
@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
|
||||
ls->ls_recover_size = 0;
|
||||
ls->ls_recover_submit = NULL;
|
||||
ls->ls_recover_result = NULL;
|
||||
ls->ls_lvb_bits = NULL;
|
||||
|
||||
error = set_recover_size(sdp, NULL, 0);
|
||||
if (error)
|
||||
|
@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
|
||||
RB_CLEAR_NODE(&ip->i_res->rs_node);
|
||||
out:
|
||||
up_write(&ip->i_rw_mutex);
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
|
||||
@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
||||
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct block_device *bdev = sb->s_bdev;
|
||||
const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
|
||||
bdev_logical_block_size(sb->s_bdev);
|
||||
u64 blk;
|
||||
sector_t start = 0;
|
||||
sector_t nr_sects = 0;
|
||||
sector_t nr_blks = 0;
|
||||
int rv;
|
||||
unsigned int x;
|
||||
u32 trimmed = 0;
|
||||
@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
||||
if (diff == 0)
|
||||
continue;
|
||||
blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
|
||||
blk *= sects_per_blk; /* convert to sectors */
|
||||
while(diff) {
|
||||
if (diff & 1) {
|
||||
if (nr_sects == 0)
|
||||
if (nr_blks == 0)
|
||||
goto start_new_extent;
|
||||
if ((start + nr_sects) != blk) {
|
||||
if (nr_sects >= minlen) {
|
||||
rv = blkdev_issue_discard(bdev,
|
||||
start, nr_sects,
|
||||
if ((start + nr_blks) != blk) {
|
||||
if (nr_blks >= minlen) {
|
||||
rv = sb_issue_discard(sb,
|
||||
start, nr_blks,
|
||||
GFP_NOFS, 0);
|
||||
if (rv)
|
||||
goto fail;
|
||||
trimmed += nr_sects;
|
||||
trimmed += nr_blks;
|
||||
}
|
||||
nr_sects = 0;
|
||||
nr_blks = 0;
|
||||
start_new_extent:
|
||||
start = blk;
|
||||
}
|
||||
nr_sects += sects_per_blk;
|
||||
nr_blks++;
|
||||
}
|
||||
diff >>= 2;
|
||||
blk += sects_per_blk;
|
||||
blk++;
|
||||
}
|
||||
}
|
||||
if (nr_sects >= minlen) {
|
||||
rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
|
||||
if (nr_blks >= minlen) {
|
||||
rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
|
||||
if (rv)
|
||||
goto fail;
|
||||
trimmed += nr_sects;
|
||||
trimmed += nr_blks;
|
||||
}
|
||||
if (ptrimmed)
|
||||
*ptrimmed = trimmed;
|
||||
|
@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
|
||||
if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
|
||||
return -ENOSPC;
|
||||
|
||||
if (name[0] == '.' && (name[1] == '\0' ||
|
||||
(name[1] == '.' && name[2] == '\0')))
|
||||
if (name[0] == '.' && (namelen < 2 ||
|
||||
(namelen == 2 && name[1] == '.')))
|
||||
return 0;
|
||||
|
||||
dentry = lookup_one_len(name, dbuf->xadir, namelen);
|
||||
|
@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
c->remounting_rw = 1;
|
||||
c->ro_mount = 0;
|
||||
|
||||
if (c->space_fixup) {
|
||||
err = ubifs_fixup_free_space(c);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = check_free_space(c);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
||||
err = dbg_check_space_info(c);
|
||||
}
|
||||
|
||||
if (c->space_fixup) {
|
||||
err = ubifs_fixup_free_space(c);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_unlock(&c->umount_mutex);
|
||||
return err;
|
||||
|
||||
|
@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data {
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_PM_DEVFREQ */
|
||||
static struct devfreq *devfreq_add_device(struct device *dev,
|
||||
static inline struct devfreq *devfreq_add_device(struct device *dev,
|
||||
struct devfreq_dev_profile *profile,
|
||||
const char *governor_name,
|
||||
void *data)
|
||||
@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int devfreq_remove_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_remove_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devfreq_suspend_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_suspend_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devfreq_resume_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_resume_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
static inline struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
unsigned long *freq, u32 flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int devfreq_register_opp_notifier(struct device *dev,
|
||||
static inline int devfreq_register_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
static inline int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@ -208,9 +208,9 @@ struct netdev_hw_addr {
|
||||
#define NETDEV_HW_ADDR_T_SLAVE 3
|
||||
#define NETDEV_HW_ADDR_T_UNICAST 4
|
||||
#define NETDEV_HW_ADDR_T_MULTICAST 5
|
||||
bool synced;
|
||||
bool global_use;
|
||||
int refcount;
|
||||
int synced;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
|
@ -2726,6 +2726,13 @@ static inline void nf_reset(struct sk_buff *skb)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nf_reset_trace(struct sk_buff *skb)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
|
||||
skb->nf_trace = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Note: This doesn't put any conntrack and bridge info in dst. */
|
||||
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
|
||||
{
|
||||
|
0
include/sound/max98090.h
Executable file → Normal file
0
include/sound/max98090.h
Executable file → Normal file
@ -488,6 +488,7 @@ struct snd_soc_dapm_path {
|
||||
/* status */
|
||||
u32 connect:1; /* source and sink widgets are connected */
|
||||
u32 walked:1; /* path has been walked */
|
||||
u32 walking:1; /* path is in the process of being walked */
|
||||
u32 weak:1; /* path ignored for power management */
|
||||
|
||||
int (*connected)(struct snd_soc_dapm_widget *source,
|
||||
|
@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
/* Check the cache first. */
|
||||
/* (Cache hit rate is typically around 35%.) */
|
||||
vma = mm->mmap_cache;
|
||||
vma = ACCESS_ONCE(mm->mmap_cache);
|
||||
if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
|
||||
struct rb_node *rb_node;
|
||||
|
||||
|
@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* check the cache first */
|
||||
vma = mm->mmap_cache;
|
||||
vma = ACCESS_ONCE(mm->mmap_cache);
|
||||
if (vma && vma->vm_start <= addr && vma->vm_end > addr)
|
||||
return vma;
|
||||
|
||||
|
@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
||||
struct sk_buff *skb;
|
||||
int copied, error = -EINVAL;
|
||||
|
||||
msg->msg_namelen = 0;
|
||||
|
||||
if (sock->state != SS_CONNECTED)
|
||||
return -ENOTCONN;
|
||||
|
||||
|
@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
ax25_address src;
|
||||
const unsigned char *mac = skb_mac_header(skb);
|
||||
|
||||
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
|
||||
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
|
||||
&digi, NULL, NULL);
|
||||
sax->sax25_family = AF_AX25;
|
||||
|
@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
if (flags & (MSG_OOB))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
msg->msg_namelen = 0;
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
||||
if (!skb) {
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
return err;
|
||||
}
|
||||
|
||||
msg->msg_namelen = 0;
|
||||
|
||||
copied = skb->len;
|
||||
if (len < copied) {
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
|
||||
rfcomm_dlc_accept(d);
|
||||
msg->msg_namelen = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
|
||||
hci_conn_accept(pi->conn->hcon, 0);
|
||||
sk->sk_state = BT_CONFIG;
|
||||
msg->msg_namelen = 0;
|
||||
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
|
@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
if (m->msg_flags&MSG_OOB)
|
||||
goto read_error;
|
||||
|
||||
m->msg_namelen = 0;
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, 0 , &ret);
|
||||
if (!skb)
|
||||
goto read_error;
|
||||
|
@ -1639,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
skb->mark = 0;
|
||||
secpath_reset(skb);
|
||||
nf_reset(skb);
|
||||
nf_reset_trace(skb);
|
||||
return netif_rx(skb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_forward_skb);
|
||||
|
@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
|
||||
ha->type = addr_type;
|
||||
ha->refcount = 1;
|
||||
ha->global_use = global;
|
||||
ha->synced = false;
|
||||
ha->synced = 0;
|
||||
list_add_tail_rcu(&ha->list, &list->list);
|
||||
list->count++;
|
||||
|
||||
@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
|
||||
addr_len, ha->type);
|
||||
if (err)
|
||||
break;
|
||||
ha->synced = true;
|
||||
ha->synced++;
|
||||
ha->refcount++;
|
||||
} else if (ha->refcount == 1) {
|
||||
__hw_addr_del(to_list, ha->addr, addr_len, ha->type);
|
||||
@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
|
||||
if (ha->synced) {
|
||||
__hw_addr_del(to_list, ha->addr,
|
||||
addr_len, ha->type);
|
||||
ha->synced = false;
|
||||
ha->synced--;
|
||||
__hw_addr_del(from_list, ha->addr,
|
||||
addr_len, ha->type);
|
||||
}
|
||||
|
@ -802,8 +802,10 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (nlh->nlmsg_flags & NLM_F_EXCL ||
|
||||
!(nlh->nlmsg_flags & NLM_F_REPLACE))
|
||||
return -EEXIST;
|
||||
|
||||
set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft);
|
||||
ifa = ifa_existing;
|
||||
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
|
||||
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
|
||||
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
|
||||
if (pfx_len - i >= 32)
|
||||
mask = 0;
|
||||
else
|
||||
mask = htonl(~((1 << (pfx_len - i)) - 1));
|
||||
mask = htonl((1 << (i - pfx_len + 32)) - 1);
|
||||
|
||||
idx = i / 32;
|
||||
addr->s6_addr32[idx] &= mask;
|
||||
|
@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
if (dst)
|
||||
dst->ops->redirect(dst, sk, skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (type == ICMPV6_PKT_TOOBIG) {
|
||||
|
@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
IRDA_DEBUG(4, "%s()\n", __func__);
|
||||
|
||||
msg->msg_namelen = 0;
|
||||
|
||||
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
|
||||
flags & MSG_DONTWAIT, &err);
|
||||
if (!skb)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user