2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-27 20:13:57 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

An ipvlan bug fix in 'net' conflicted with the abstraction away
of the IPV6 specific support in 'net-next'.

Similarly, a bug fix for mlx5 in 'net' conflicted with the flow
action conversion in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-08 15:00:17 -08:00
commit a655fe9f19
399 changed files with 4114 additions and 1568 deletions

View File

@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
cpld3_version
Date: November 2018
KernelVersion: 4.21
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show with which CPLD versions have been burned
on LED board.
@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
jtag_enable
Date: November 2018
KernelVersion: 4.21
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files enable and disable the access to the JTAG domain.
By default access to the JTAG domain is disabled.
@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
reset_voltmon_upgrade_fail
Date: November 2018
KernelVersion: 4.21
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show the system reset cause, as following: ComEx
power fail, reset from ComEx, system platform reset, reset

View File

@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
quiet_cmd_mk_schema = SCHEMA $@
cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml')
DT_DOCS = $(shell \
cd $(srctree)/$(src) && \
find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
)
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))

View File

@ -4,14 +4,10 @@ Required properties:
- compatible : "olpc,ap-sp"
- reg : base address and length of SoC's WTM registers
- interrupts : SP-AP interrupt
- clocks : phandle + clock-specifier for the clock that drives the WTM
- clock-names: should be "sp"
Example:
ap-sp@d4290000 {
compatible = "olpc,ap-sp";
reg = <0xd4290000 0x1000>;
interrupts = <40>;
clocks = <&soc_clocks MMP2_CLK_SP>;
clock-names = "sp";
}

View File

@ -56,26 +56,32 @@ of any kernel data structures.
dentry-state:
From linux/fs/dentry.c:
From linux/include/linux/dcache.h:
--------------------------------------------------------------
struct {
struct dentry_stat_t dentry_stat {
int nr_dentry;
int nr_unused;
int age_limit; /* age in seconds */
int want_pages; /* pages requested by system */
int dummy[2];
} dentry_stat = {0, 0, 45, 0,};
--------------------------------------------------------------
int nr_negative; /* # of unused negative dentries */
int dummy; /* Reserved for future use */
};
--------------------------------------------------------------
Dentries are dynamically allocated and deallocated.
nr_dentry shows the total number of dentries allocated (active
+ unused). nr_unused shows the number of dentries that are not
actively used, but are saved in the LRU list for future reuse.
Dentries are dynamically allocated and deallocated, and
nr_dentry seems to be 0 all the time. Hence it's safe to
assume that only nr_unused, age_limit and want_pages are
used. Nr_unused seems to be exactly what its name says.
Age_limit is the age in seconds after which dcache entries
can be reclaimed when memory is short and want_pages is
nonzero when shrink_dcache_pages() has been called and the
dcache isn't pruned yet.
nr_negative shows the number of unused dentries that are also
negative dentries which do not mapped to actual files.
==============================================================
dquot-max & dquot-nr:

View File

@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo
This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"

View File

@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h
BPF (Safe dynamic programs and tools)
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@ -2873,6 +2876,8 @@ F: samples/bpf/
F: tools/bpf/
F: tools/lib/bpf/
F: tools/testing/selftests/bpf/
K: bpf
N: bpf
BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
@ -12890,6 +12895,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
F: drivers/net/dsa/realtek-smi*
F: drivers/net/dsa/rtl83*
REDPINE WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar@gmail.com>
M: Siva Rebbagondla <siva8118@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/rsi/
REGISTER MAP ABSTRACTION
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
@ -13718,6 +13730,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/sfc/
SFF/SFP/SFP+ MODULE SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/phylink.c
F: drivers/net/phy/sfp*
F: include/linux/phylink.h
F: include/linux/sfp.h
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
S: Maintained
@ -16663,6 +16684,15 @@ S: Maintained
F: drivers/platform/x86/
F: drivers/platform/olpc/
X86 PLATFORM DRIVERS - ARCH
R: Darren Hart <dvhart@infradead.org>
R: Andy Shevchenko <andy@infradead.org>
L: platform-driver-x86@vger.kernel.org
L: x86@kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
F: arch/x86/platform
X86 VDSO
M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Shy Crocodile
# *DOCUMENTATION*

View File

@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
return base + (where & 0xffc) + (devfn << 12);
return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
u32 mask = (0x1ull << (size * 8)) - 1;
int shift = (where % 4) * 8;
ret = pci_generic_config_read32(bus, devfn, where, size, val);
ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
(where & 0xffc) == PCI_CLASS_REVISION)

View File

@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed())
if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
/* make the crash dump kernel image protected again */
crash_post_resume();

View File

@ -28,6 +28,8 @@
#include <asm/virt.h>
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__hyp_stub_vectors)

View File

@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,

View File

@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
addr < (unsigned long)__entry_text_end) ||
(addr >= (unsigned long)__idmap_text_start &&
addr < (unsigned long)__idmap_text_end) ||
(addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
!!search_exception_tables(addr))
return true;
if (!is_kernel_in_hyp_mode()) {
if ((addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
(addr >= (unsigned long)__hyp_idmap_text_start &&
if ((addr >= (unsigned long)__hyp_idmap_text_start &&
addr < (unsigned long)__hyp_idmap_text_end))
return true;
}

View File

@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
__clean_dcache_area_pou(kaddr, len);
__flush_icache_all();
} else {
flush_icache_range(addr, addr + len);
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
__flush_icache_range(addr, addr + len);
}
}

View File

@ -30,6 +30,7 @@ generic-y += pgalloc.h
generic-y += preempt.h
generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h

View File

@ -1,5 +1,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -40,6 +40,7 @@ generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += sizes.h
generic-y += spinlock.h
generic-y += timex.h

View File

@ -1,5 +1,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -30,6 +30,7 @@ generic-y += rwsem.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += sizes.h
generic-y += topology.h
generic-y += trace_clock.h

View File

@ -1,4 +1,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
generic-y += shmparam.h
generic-y += spinlock.h
generic-y += topology.h
generic-y += trace_clock.h

View File

@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generic-y += kvm_para.h
generic-y += shmparam.h

View File

@ -26,6 +26,7 @@ generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += syscalls.h
generic-y += topology.h
generic-y += trace_clock.h

View File

@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generic-y += kvm_para.h
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += shmparam.h
generic-y += string.h
generic-y += switch_to.h
generic-y += topology.h

View File

@ -1,5 +1,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -104,7 +104,7 @@ choice
prompt "Base ISA"
default ARCH_RV64I
help
This selects the base ISA that this kernel will traget and must match
This selects the base ISA that this kernel will target and must match
the target platform.
config ARCH_RV32I

View File

@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SMP=y
CONFIG_PCI=y
CONFIG_PCIE_XILINX=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_NET=y
@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_VIRTIO_MMIO=y
CONFIG_SIFIVE_PLIC=y
CONFIG_RAS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set

View File

@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
#ifdef CONFIG_64BITS
#ifdef CONFIG_64BIT
#define PTE_FMT "%016lx"
#else
#define PTE_FMT "%08lx"

View File

@ -22,7 +22,7 @@
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP

View File

@ -39,6 +39,7 @@ void asm_offsets(void)
OFFSET(TASK_STACK, task_struct, stack);
OFFSET(TASK_TI, task_struct, thread_info);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);

View File

@ -144,6 +144,10 @@ _save_context:
REG_L x2, PT_SP(sp)
.endm
#if !IS_ENABLED(CONFIG_PREEMPT)
.set resume_kernel, restore_all
#endif
ENTRY(handle_exception)
SAVE_ALL
@ -228,7 +232,7 @@ ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
andi s0, s0, SR_SPP
bnez s0, restore_all
bnez s0, resume_kernel
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
@ -250,6 +254,18 @@ restore_all:
RESTORE_ALL
sret
#if IS_ENABLED(CONFIG_PREEMPT)
resume_kernel:
REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
bnez s0, restore_all
need_resched:
REG_L s0, TASK_TI_FLAGS(tp)
andi s0, s0, _TIF_NEED_RESCHED
beqz s0, restore_all
call preempt_schedule_irq
j need_resched
#endif
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception

View File

@ -181,7 +181,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = memblock_end_of_DRAM();
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();

View File

@ -57,15 +57,12 @@ void __init setup_smp(void)
while ((dn = of_find_node_by_type(dn, "cpu"))) {
hart = riscv_of_processor_hartid(dn);
if (hart < 0) {
of_node_put(dn);
if (hart < 0)
continue;
}
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
of_node_put(dn);
continue;
}
@ -73,7 +70,6 @@ void __init setup_smp(void)
set_cpu_possible(cpuid, true);
set_cpu_present(cpuid, true);
cpuid++;
of_node_put(dn);
}
BUG_ON(!found_boot_cpu);

View File

@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
(unsigned long) PFN_PHYS(max_low_pfn)));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

View File

@ -28,6 +28,7 @@ generic-y += preempt.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += sizes.h
generic-y += syscalls.h
generic-y += topology.h

View File

@ -1,5 +1,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
generic-y += shmparam.h
generic-y += ucontext.h

View File

@ -446,12 +446,12 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config X86_RESCTRL
bool "Resource Control support"
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
help
Enable Resource Control support.
Enable x86 CPU resource control support.
Provide support for the allocation and monitoring of system resources
usage by the CPU.

View File

@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
movl %eax, %cr3
3:
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
pushl %ecx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
wrmsr
popl %ecx
/* Enable PAE and LA57 (if required) paging modes */
movl $X86_CR4_PAE, %eax
cmpl $0, %edx

View File

@ -6,7 +6,7 @@
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE

View File

@ -6,7 +6,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
* "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* While adding a new CPUID for a new microarchitecture, add a new
* group to keep logically sorted out in chronological order. Within
@ -71,6 +71,7 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */

View File

@ -7,7 +7,11 @@
#endif
#ifdef CONFIG_KASAN
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_STACK_ORDER 2
#else
#define KASAN_STACK_ORDER 1
#endif
#else
#define KASAN_STACK_ORDER 0
#endif

View File

@ -2,7 +2,7 @@
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H
#ifdef CONFIG_X86_RESCTRL
#ifdef CONFIG_X86_CPU_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
static inline void resctrl_sched_in(void) {}
#endif /* CONFIG_X86_RESCTRL */
#endif /* CONFIG_X86_CPU_RESCTRL */
#endif /* _ASM_X86_RESCTRL_SCHED_H */

View File

@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_X86_RESCTRL) += resctrl/
obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o

View File

@ -71,7 +71,7 @@ void __init check_bugs(void)
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
cpu_smt_check_topology_early();
cpu_smt_check_topology();
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");

View File

@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
if (!p) {
return ret;
} else {
if (boot_cpu_data.microcode == p->patch_id)
if (boot_cpu_data.microcode >= p->patch_id)
return ret;
ret = UCODE_NEW;

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o
obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)

View File

@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
if (!current_ei->efi_memmap_size)
return 0;

View File

@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);

View File

@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
* Warn upon starting the first VM in a potentially
* insecure environment.
*/
if (cpu_smt_control == CPU_SMT_ENABLED)
if (sched_smt_active())
pr_warn_once(L1TF_MSG_SMT);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
pr_warn_once(L1TF_MSG_L1D);

View File

@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
/*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
* is returned, but our callers are not ready for that and they blindly
* call kvm_inject_page_fault. Ensure that they at least do not leak
* uninitialized kernel stack memory into cr2 and error code.
*/
memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}

View File

@ -2,8 +2,11 @@
#include <linux/module.h>
#include <linux/io.h>
#define movs(type,to,from) \
asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
/* Originally from i386/string.h */
static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
static __always_inline void rep_movs(void *to, const void *from, size_t n)
{
unsigned long d0, d1, d2;
asm volatile("rep ; movsl\n\t"
@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
__iomem_memcpy(to, (const void *)from, n);
if (unlikely(!n))
return;
/* Align any unaligned source IO */
if (unlikely(1 & (unsigned long)from)) {
movs("b", to, from);
n--;
}
if (n > 1 && unlikely(2 & (unsigned long)from)) {
movs("w", to, from);
n-=2;
}
rep_movs(to, (const void *)from, n);
}
EXPORT_SYMBOL(memcpy_fromio);
void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
{
__iomem_memcpy((void *)to, (const void *) from, n);
if (unlikely(!n))
return;
/* Align any unaligned destination IO */
if (unlikely(1 & (unsigned long)to)) {
movs("b", to, from);
n--;
}
if (n > 1 && unlikely(2 & (unsigned long)to)) {
movs("w", to, from);
n-=2;
}
rep_movs((void *)to, (const void *) from, n);
}
EXPORT_SYMBOL(memcpy_toio);

View File

@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
return;
}
addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24);
addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
#ifdef CONFIG_X86_64
addr |= ((u64)desc.base3 << 32);
#endif

View File

@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI
If unsure, say N.
config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in use space"
bool "Unaligned memory access in user space"
help
The Xtensa architecture currently does not handle unaligned
memory accesses in hardware but through an exception handler.
@ -451,7 +451,7 @@ config USE_OF
help
Include support for flattened device tree machine descriptions.
config BUILTIN_DTB
config BUILTIN_DTB_SOURCE
string "DTB to build into the kernel image"
depends on OF

View File

@ -7,9 +7,9 @@
#
#
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
endif
# for CONFIG_OF_ALL_DTBS test

View File

@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705"
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM=y

View File

@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="csp"
CONFIG_BUILTIN_DTB_SOURCE="csp"
# CONFIG_COMPACTION is not set
CONFIG_XTFPGA_LCD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set

View File

@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705"
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y

View File

@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705_nommu"
CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y

View File

@ -33,11 +33,12 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="lx200mx"
CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y

View File

@ -276,12 +276,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
memw
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@ -317,11 +318,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b

View File

@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
for (i = 0; i < max_cpus; ++i)
for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
if (ncpus > NR_CPUS) {
ncpus = NR_CPUS;
pr_info("%s: limiting core count by %d\n", __func__, ncpus);
}
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
cpu_start_id = cpu;
system_flush_invalidate_dcache_range(
(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
WRITE_ONCE(cpu_start_id, cpu);
/* Pairs with the third memw in the cpu_restart */
mb();
system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
cpu_start_ccount = ccount;
WRITE_ONCE(cpu_start_ccount, ccount);
while (time_before(jiffies, timeout)) {
do {
/*
* Pairs with the first two memws in the
* .Lboot_secondary.
*/
mb();
if (!cpu_start_ccount)
break;
}
ccount = READ_ONCE(cpu_start_ccount);
} while (ccount && time_before(jiffies, timeout));
if (cpu_start_ccount) {
if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
(void *)cpu, 1);
cpu_start_ccount = 0;
(void *)cpu, 1);
WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
if (cpu_start_id == -cpu) {
sizeof(cpu_start_id));
/* Pairs with the second memw in the cpu_restart */
mb();
if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}

View File

@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
disable_irq(evt->irq);
disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;

View File

@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work);
}
static void blk_timeout_work(struct work_struct *work)
{
}
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);

View File

@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_run_hw_queue(hctx, true);
blk_mq_sched_restart(hctx);
}
/**

View File

@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
static bool debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
if (IS_ERR_OR_NULL(parent))
return false;
d_inode(parent)->i_private = data;
for (; attr->name; attr++) {

View File

@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
char *device_name, *device_names, *device_tmp;
char *device_name, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
char *device_names = NULL;
ret = binder_alloc_shrinker_init();
if (ret)
@ -5898,22 +5899,28 @@ static int __init binder_init(void)
&transaction_log_fops);
}
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
if (strcmp(binder_devices_param, "") != 0) {
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
}
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
}
}
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
}
ret = init_binderfs();
if (ret)
goto err_init_binder_device_failed;
return ret;

View File

@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
extern int __init init_binderfs(void);
#else
static inline int __init init_binderfs(void)
{
return 0;
}
#endif
#endif /* _LINUX_BINDER_INTERNAL_H */

View File

@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
struct inode *inode = NULL;
struct dentry *root = sb->s_root;
struct binderfs_info *info = sb->s_fs_info;
#if defined(CONFIG_IPC_NS)
bool use_reserve = (info->ipc_ns == &init_ipc_ns);
#else
bool use_reserve = true;
#endif
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
/* Reserve a new minor number for the new device. */
mutex_lock(&binderfs_minors_mutex);
minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
minor = ida_alloc_max(&binderfs_minors,
use_reserve ? BINDERFS_MAX_MINOR :
BINDERFS_MAX_MINOR_CAPPED,
GFP_KERNEL);
mutex_unlock(&binderfs_minors_mutex);
if (minor < 0) {
ret = minor;
@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
static int __init init_binderfs(void)
int __init init_binderfs(void)
{
int ret;
@ -560,5 +568,3 @@ static int __init init_binderfs(void)
return ret;
}
device_initcall(init_binderfs);

View File

@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
if (of_property_read_u32(np, propname, &this_leaf->size))
this_leaf->size = 0;
of_property_read_u32(np, propname, &this_leaf->size);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
this_leaf->number_of_sets = 0;
of_property_read_u32(np, propname, &this_leaf->number_of_sets);
}
static void cache_associativity(struct cacheinfo *this_leaf)

View File

@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
u64 last_busy, expires = 0;
u64 now = ktime_to_ns(ktime_get());
u64 now = ktime_get_mono_fast_ns();
if (!dev->power.use_autosuspend)
goto out;
@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
ktime_t expires;
u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
expires = ktime_add(ktime_get(), ms_to_ktime(delay));
dev->power.timer_expires = ktime_to_ns(expires);
expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);

View File

@ -1513,10 +1513,20 @@ static int clk_fetch_parent_index(struct clk_core *core,
if (!parent)
return -EINVAL;
for (i = 0; i < core->num_parents; i++)
if (clk_core_get_parent_by_index(core, i) == parent)
for (i = 0; i < core->num_parents; i++) {
if (core->parents[i] == parent)
return i;
if (core->parents[i])
continue;
/* Fallback to comparing globally unique names */
if (!strcmp(parent->name, core->parent_names[i])) {
core->parents[i] = parent;
return i;
}
}
return -EINVAL;
}

View File

@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_frac_pll *pll = to_clk_frac_pll(hw);
u32 val, divfi, divff;
u64 temp64 = parent_rate;
u64 temp64;
int ret;
parent_rate *= 8;
rate *= 2;
divfi = rate / parent_rate;
temp64 *= rate - divfi;
temp64 = parent_rate * divfi;
temp64 = rate - temp64;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;

View File

@ -53,7 +53,6 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
#define APMU_SP 0x68
#define MPMU_UART_PLL 0x14
struct mmp2_clk_unit {
@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
};
static DEFINE_SPINLOCK(sp_lock);
static struct mmp_param_mux_clk apmu_mux_clks[] = {
{MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
{MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
{MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
};
static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)

View File

@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
"core_bi_pll_test_se",
};
static const char * const gcc_parent_names_7[] = {
"bi_tcxo",
static const char * const gcc_parent_names_7_ao[] = {
"bi_tcxo_ao",
"gpll0",
"gpll0_out_even",
"core_bi_pll_test_se",
@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
"core_bi_pll_test_se",
};
static const char * const gcc_parent_names_8_ao[] = {
"bi_tcxo_ao",
"gpll0",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_10[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
.parent_names = gcc_parent_names_7,
.parent_names = gcc_parent_names_7_ao,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_rbcpr_clk_src",
.parent_names = gcc_parent_names_8,
.parent_names = gcc_parent_names_8_ao,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},

View File

@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
if (!tmp) {
*table = ERR_PTR(-ENOMEM);
return -ENOMEM;
}
valid_div = 0;
*width = 0;
@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
{
struct clk_omap_divider *div;
struct clk_omap_reg *reg;
int ret;
if (!setup)
return NULL;
@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
div->flags |= CLK_DIVIDER_POWER_OF_TWO;
div->table = _get_div_table_from_setup(setup, &div->width);
if (IS_ERR(div->table)) {
ret = PTR_ERR(div->table);
kfree(div);
return ERR_PTR(ret);
}
div->shift = setup->bit_shift;
div->latch = -EINVAL;

View File

@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit = TICK_USEC;
u64 limit = TICK_NSEC;
int i;
for (i = 1; i < drv->state_count; i++) {

View File

@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
struct nitrox_device *ndev = cmdq->ndev;
struct nitrox_softreq *sr;
int req_completed = 0, err = 0, budget;
completion_t callback;
void *cb_arg;
/* check all pending requests */
budget = atomic_read(&cmdq->pending_count);
@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
smp_mb__after_atomic();
/* remove from response list */
response_list_del(sr, cmdq);
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
if (sr->callback)
sr->callback(sr->cb_arg, err);
callback = sr->callback;
cb_arg = sr->cb_arg;
softreq_destroy(sr);
if (callback)
callback(cb_arg, err);
req_completed++;
}

View File

@ -37,8 +37,9 @@ extern u64 efi_system_table;
static struct ptdump_info efi_ptdump_info = {
.mm = &efi_mm,
.markers = (struct addr_marker[]){
{ 0, "UEFI runtime start" },
{ DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }
{ 0, "UEFI runtime start" },
{ DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
{ -1, NULL }
},
.base_addr = 0,
};

View File

@ -508,14 +508,11 @@ static int __init s10_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, s10_of_match);
if (!np) {
of_node_put(fw_np);
if (!np)
return -ENODEV;
}
of_node_put(np);
ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
of_node_put(fw_np);
if (ret)
return ret;

View File

@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
unsigned int nr, int value)
{
if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
altr_a10sr_gpio_set(gc, nr, value);
return 0;
}
return -EINVAL;
}

View File

@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
{
return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
case SPRD_EIC_ASYNC:
return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
case SPRD_EIC_SYNC:
return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
default:
return -ENOTSUPP;
}
}
static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;

View File

@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
*/
struct pcf857x {
struct gpio_chip chip;
struct irq_chip irqchip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
unsigned out; /* software latch */
@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&gpio->lock);
}
static struct irq_chip pcf857x_irq_chip = {
.name = "pcf857x",
.irq_enable = pcf857x_irq_enable,
.irq_disable = pcf857x_irq_disable,
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
.irq_set_wake = pcf857x_irq_set_wake,
.irq_bus_lock = pcf857x_irq_bus_lock,
.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
};
/*-------------------------------------------------------------------------*/
static int pcf857x_probe(struct i2c_client *client,
@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
/* Enable irqchip if we have an interrupt */
if (client->irq) {
gpio->irqchip.name = "pcf857x",
gpio->irqchip.irq_enable = pcf857x_irq_enable,
gpio->irqchip.irq_disable = pcf857x_irq_disable,
gpio->irqchip.irq_ack = noop,
gpio->irqchip.irq_mask = noop,
gpio->irqchip.irq_unmask = noop,
gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
status = gpiochip_irqchip_add_nested(&gpio->chip,
&pcf857x_irq_chip,
&gpio->irqchip,
0, handle_level_irq,
IRQ_TYPE_NONE);
if (status) {
@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
if (status)
goto fail;
gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
client->irq);
gpio->irq_parent = client->irq;
}

View File

@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);

View File

@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
/* Do not leak kernel stack to userspace */
memset(&ge, 0, sizeof(ge));
ge.timestamp = le->timestamp;
/*
* We may be running from a nested threaded interrupt in which case
* we didn't get the timestamp from lineevent_irq_handler().
*/
if (!le->timestamp)
ge.timestamp = ktime_get_real_ns();
else
ge.timestamp = le->timestamp;
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {

View File

@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;

View File

@ -38,6 +38,7 @@
#include "amdgpu_gem.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@ -187,6 +188,48 @@ error:
return ERR_PTR(ret);
}
static int
__reservation_object_make_exclusive(struct reservation_object *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
return 0;
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
reservation_object_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1), 0,
false);
if (!array)
goto err_fences_put;
reservation_object_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
return 0;
err_fences_put:
while (count--)
dma_fence_put(fences[count]);
kfree(fences);
return -ENOMEM;
}
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) {
/*
* Wait for all shared fences to complete before we switch to future
* use of exclusive fence on this prime shared bo.
* We only create shared fences for internal use, but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
MAX_SCHEDULE_TIMEOUT);
if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
r = __reservation_object_make_exclusive(bo->tbo.resv);
if (r)
goto error_unreserve;
}
}
/* pin buffer into GTT */

View File

@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
unsigned long flags;
spin_lock(&adev->vm_manager.pasid_lock);
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
spin_unlock(&adev->vm_manager.pasid_lock);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**

View File

@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,

View File

@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->external_rev_id = adev->rev_id + 0x81;
adev->external_rev_id = adev->rev_id + 0x79;
else if (adev->pdev->device == 0x15d8)
adev->external_rev_id = adev->rev_id + 0x41;
else if (adev->rev_id == 1)
adev->external_rev_id = adev->rev_id + 0x20;
else
adev->external_rev_id = 0x1;
adev->external_rev_id = adev->rev_id + 0x01;
if (adev->rev_id >= 0x8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |

View File

@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
#if CONFIG_X86_64
#ifdef CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)

View File

@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
}
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_vrr_capable_property(
&aconnector->base);
}

View File

@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
* This is not required for less than 5 displays,
* thus don't request decfclk in dc to avoid impact
* on power saving.
*
*/
pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;

View File

@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
break;
case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
break;
default:
return -EINVAL;
}

View File

@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
if (mode->hsync)
return mode->hsync;
if (mode->htotal < 0)
if (mode->htotal <= 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */

View File

@ -1086,7 +1086,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
return DDI_CLK_SEL_TBT_810;
default:
MISSING_CASE(clock);
break;
return DDI_CLK_SEL_NONE;
}
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:

View File

@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
}
}
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
* the hardware when a high res displays plugged in. DPLL P
* divider is zero, and the pipe timings are bonkers. We'll
* try to disable everything in that case.
*
* FIXME would be nice to be able to sanitize this state
* without several WARNs, but for now let's take the easy
* road.
*/
return IS_GEN6(dev_priv) &&
crtc_state->base.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
}
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
bool has_active_crtc = crtc_state &&
crtc_state->base.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
}
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
* the encoder manually again. */
if (encoder->base.crtc) {
struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
if (crtc_state) {
struct drm_encoder *best_encoder;
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
/* avoid oopsing in case the hooks consult best_encoder */
best_encoder = connector->base.state->best_encoder;
connector->base.state->best_encoder = &encoder->base;
if (encoder->disable)
encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
encoder->disable(encoder, crtc_state,
connector->base.state);
if (encoder->post_disable)
encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
encoder->post_disable(encoder, crtc_state,
connector->base.state);
connector->base.state->best_encoder = best_encoder;
}
encoder->base.crtc = NULL;

View File

@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
keymsk = key->channel_mask & 0x3ffffff;
keymsk = key->channel_mask & 0x7ffffff;
if (alpha < 0xff)
keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;

View File

@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
{
struct dsi_data *dsi = p;
struct dsi_data *dsi = s->private;
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = p;
struct dsi_data *dsi = s->private;
unsigned long flags;
struct dsi_irq_stats stats;
@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = p;
struct dsi_data *dsi = s->private;
if (dsi_runtime_get(dsi))
return 0;
@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
/*
* HACK: These flags should be handled through the omap_dss_device bus
* flags, but this will only be possible when the DSI encoder will be
* converted to the omapdrm-managed encoder model.
*/
dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
dss_mgr_set_timings(&dsi->output, &dsi->vm);
@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_regs, &dsi);
dsi_dump_dsi_regs, dsi);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_irqs, &dsi);
dsi_dump_dsi_irqs, dsi);
#endif
snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
dsi_dump_dsi_clocks, &dsi);
dsi_dump_dsi_clocks, dsi);
return 0;
}
@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
dss_debugfs_remove_file(dsi->debugfs.irqs);
dss_debugfs_remove_file(dsi->debugfs.regs);
of_platform_depopulate(dev);
WARN_ON(dsi->scp_clk_refcount > 0);
dss_pll_unregister(&dsi->pll);
@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
dsi_uninit_output(dsi);
of_platform_depopulate(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {

View File

@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
enum pci_bus_speed speed_cap;
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
speed_cap = pcie_get_speed_cap(root);
if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
} else {

View File

@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
enum pci_bus_speed speed_cap;
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
speed_cap = pcie_get_speed_cap(root);
if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
} else {

Some files were not shown because too many files have changed in this diff Show More