mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Overlapping header include additions in macsec.c A bug fix in 'net' overlapping with the removal of 'version' string in ena_netdev.c Overlapping test additions in selftests Makefile Overlapping PCI ID table adjustments in iwlwifi driver. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9fb16955fb
1
.mailmap
1
.mailmap
@ -225,6 +225,7 @@ Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
|
||||
Praveen BP <praveenbp@ti.com>
|
||||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
|
||||
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
|
||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
Rajesh Shah <rajesh.shah@intel.com>
|
||||
|
@ -110,6 +110,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX GICv3 | #38539 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 |
|
||||
|
@ -266,11 +266,15 @@ to use.
|
||||
attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
|
||||
|
||||
From the DMA driver the following is expected for this mode:
|
||||
|
||||
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
|
||||
|
||||
The data from the provided metadata buffer should be prepared for the DMA
|
||||
controller to be sent alongside of the payload data. Either by copying to a
|
||||
hardware descriptor, or highly coupled packet.
|
||||
|
||||
- DMA_DEV_TO_MEM
|
||||
|
||||
On transfer completion the DMA driver must copy the metadata to the client
|
||||
provided metadata buffer before notifying the client about the completion.
|
||||
After the transfer completion, DMA drivers must not touch the metadata
|
||||
@ -284,10 +288,14 @@ to use.
|
||||
and dmaengine_desc_set_metadata_len() is provided as helper functions.
|
||||
|
||||
From the DMA driver the following is expected for this mode:
|
||||
- get_metadata_ptr
|
||||
|
||||
- get_metadata_ptr()
|
||||
|
||||
Should return a pointer for the metadata buffer, the maximum size of the
|
||||
metadata buffer and the currently used / valid (if any) bytes in the buffer.
|
||||
- set_metadata_len
|
||||
|
||||
- set_metadata_len()
|
||||
|
||||
It is called by the clients after it have placed the metadata to the buffer
|
||||
to let the DMA driver know the number of valid bytes provided.
|
||||
|
||||
|
@ -258,11 +258,11 @@ conditions.
|
||||
| option | condition | size read write read write |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes no yes yes |
|
||||
| remount-ro | read-only | fixed yes no yes no |
|
||||
| remount-ro | read-only | as is yes no yes no |
|
||||
| (default) | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes no yes yes |
|
||||
| zone-ro | read-only | fixed yes no yes no |
|
||||
| zone-ro | read-only | as is yes no yes no |
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | 0 no no yes yes |
|
||||
@ -270,7 +270,7 @@ conditions.
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes yes yes yes |
|
||||
| repair | read-only | fixed yes no yes no |
|
||||
| repair | read-only | as is yes no yes no |
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
|
||||
@ -307,8 +307,16 @@ condition changes. The defined behaviors are as follow:
|
||||
* zone-offline
|
||||
* repair
|
||||
|
||||
The I/O error actions defined for each behavior are detailed in the previous
|
||||
section.
|
||||
The run-time I/O error actions defined for each behavior are detailed in the
|
||||
previous section. Mount time I/O errors will cause the mount operation to fail.
|
||||
The handling of read-only zones also differs between mount-time and run-time.
|
||||
If a read-only zone is found at mount time, the zone is always treated in the
|
||||
same manner as offline zones, that is, all accesses are disabled and the zone
|
||||
file size set to 0. This is necessary as the write pointer of read-only zones
|
||||
is defined as invalib by the ZBC and ZAC standards, making it impossible to
|
||||
discover the amount of data that has been written to the zone. In the case of a
|
||||
read-only zone discovered at run-time, as indicated in the previous section.
|
||||
the size of the zone file is left unchanged from its last updated value.
|
||||
|
||||
Zonefs User Space Tools
|
||||
=======================
|
||||
|
@ -237,7 +237,7 @@ This is solely useful to speed up test compiles.
|
||||
KBUILD_EXTRA_SYMBOLS
|
||||
--------------------
|
||||
For modules that use symbols from other modules.
|
||||
See more details in modules.txt.
|
||||
See more details in modules.rst.
|
||||
|
||||
ALLSOURCE_ARCHS
|
||||
---------------
|
||||
|
@ -44,7 +44,7 @@ intermediate::
|
||||
def_bool y
|
||||
|
||||
Then, Kconfig moves onto the evaluation stage to resolve inter-symbol
|
||||
dependency as explained in kconfig-language.txt.
|
||||
dependency as explained in kconfig-language.rst.
|
||||
|
||||
|
||||
Variables
|
||||
|
@ -924,7 +924,7 @@ When kbuild executes, the following steps are followed (roughly):
|
||||
$(KBUILD_AFLAGS_MODULE) is used to add arch-specific options that
|
||||
are used for assembler.
|
||||
|
||||
From commandline AFLAGS_MODULE shall be used (see kbuild.txt).
|
||||
From commandline AFLAGS_MODULE shall be used (see kbuild.rst).
|
||||
|
||||
KBUILD_CFLAGS_KERNEL
|
||||
$(CC) options specific for built-in
|
||||
@ -937,7 +937,7 @@ When kbuild executes, the following steps are followed (roughly):
|
||||
|
||||
$(KBUILD_CFLAGS_MODULE) is used to add arch-specific options that
|
||||
are used for $(CC).
|
||||
From commandline CFLAGS_MODULE shall be used (see kbuild.txt).
|
||||
From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
|
||||
|
||||
KBUILD_LDFLAGS_MODULE
|
||||
Options for $(LD) when linking modules
|
||||
@ -945,7 +945,7 @@ When kbuild executes, the following steps are followed (roughly):
|
||||
$(KBUILD_LDFLAGS_MODULE) is used to add arch-specific options
|
||||
used when linking modules. This is often a linker script.
|
||||
|
||||
From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
|
||||
From commandline LDFLAGS_MODULE shall be used (see kbuild.rst).
|
||||
|
||||
KBUILD_LDS
|
||||
|
||||
|
@ -470,9 +470,9 @@ build.
|
||||
|
||||
The syntax of the Module.symvers file is::
|
||||
|
||||
<CRC> <Symbol> <Namespace> <Module> <Export Type>
|
||||
<CRC> <Symbol> <Module> <Export Type> <Namespace>
|
||||
|
||||
0xe1cc2a05 usb_stor_suspend USB_STORAGE drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL
|
||||
0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
|
||||
|
||||
The fields are separated by tabs and values may be empty (e.g.
|
||||
if no namespace is defined for an exported symbol).
|
||||
|
@ -7527,6 +7527,12 @@ F: include/uapi/linux/if_hippi.h
|
||||
F: net/802/hippi.c
|
||||
F: drivers/net/hippi/
|
||||
|
||||
HISILICON DMA DRIVER
|
||||
M: Zhou Wang <wangzhou1@hisilicon.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/dma/hisi_dma.c
|
||||
|
||||
HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
|
||||
M: Zaibo Xu <xuzaibo@huawei.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
@ -8487,7 +8493,6 @@ L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/dma/idxd/*
|
||||
F: include/uapi/linux/idxd.h
|
||||
F: include/linux/idxd.h
|
||||
|
||||
INTEL IDLE DRIVER
|
||||
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||
@ -8694,7 +8699,7 @@ M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Intel Linux Wireless <linuxwifi@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://intellinuxwireless.org
|
||||
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
|
||||
S: Supported
|
||||
F: drivers/net/wireless/intel/iwlwifi/
|
||||
|
4
Makefile
4
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1804,7 +1804,7 @@ existing-targets := $(wildcard $(sort $(targets)))
|
||||
|
||||
-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
|
||||
|
||||
endif # config-targets
|
||||
endif # config-build
|
||||
endif # mixed-build
|
||||
endif # need-sub-make
|
||||
|
||||
|
@ -154,7 +154,7 @@ config ARC_CPU_HS
|
||||
help
|
||||
Support for ARC HS38x Cores based on ARCv2 ISA
|
||||
The notable features are:
|
||||
- SMP configurations of upto 4 core with coherency
|
||||
- SMP configurations of up to 4 cores with coherency
|
||||
- Optional L2 Cache and IO-Coherency
|
||||
- Revised Interrupt Architecture (multiple priorites, reg banks,
|
||||
auto stack switch, auto regfile save/restore)
|
||||
@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET
|
||||
help
|
||||
In SMP configuration cores can be configured as Halt-on-reset
|
||||
or they could all start at same time. For Halt-on-reset, non
|
||||
masters are parked until Master kicks them so they can start of
|
||||
masters are parked until Master kicks them so they can start off
|
||||
at designated entry point. For other case, all jump to common
|
||||
entry point and spin wait for Master's signal.
|
||||
|
||||
|
@ -21,8 +21,6 @@ CONFIG_MODULES=y
|
||||
CONFIG_MODULE_FORCE_LOAD=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_EZNPS=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=4096
|
||||
|
@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_NET=y
|
||||
|
@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_TIMERS_64BIT is not set
|
||||
|
@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs);
|
||||
|
||||
#endif /* !CONFIG_ISA_ARCOMPACT */
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
|
||||
|
||||
#else /* !CONFIG_ARC_FPU_SAVE_RESTORE */
|
||||
|
@ -29,6 +29,8 @@
|
||||
.endm
|
||||
|
||||
#define ASM_NL ` /* use '`' to mark new line in macro */
|
||||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
|
||||
/* annotation for data we want in DCCM - if enabled in .config */
|
||||
.macro ARCFP_DATA nm
|
||||
|
@ -8,11 +8,11 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/of_clk.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/cache.h>
|
||||
|
@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address)
|
||||
if (IS_ERR(nm))
|
||||
nm = "?";
|
||||
}
|
||||
pr_info(" @off 0x%lx in [%s]\n"
|
||||
" VMA: 0x%08lx to 0x%08lx\n",
|
||||
pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n",
|
||||
vma->vm_start < TASK_UNMAPPED_BASE ?
|
||||
address : address - vma->vm_start,
|
||||
nm, vma->vm_start, vma->vm_end);
|
||||
@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs)
|
||||
unsigned int vec, cause_code;
|
||||
unsigned long address;
|
||||
|
||||
pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
|
||||
|
||||
/* For Data fault, this is data address not instruction addr */
|
||||
address = current->thread.fault_address;
|
||||
|
||||
@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs)
|
||||
|
||||
/* For DTLB Miss or ProtV, display the memory involved too */
|
||||
if (vec == ECR_V_DTLB_MISS) {
|
||||
pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
|
||||
pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
|
||||
(cause_code == 0x01) ? "Read" :
|
||||
((cause_code == 0x02) ? "Write" : "EX"),
|
||||
address, regs->ret);
|
||||
address, (void *)regs->ret);
|
||||
} else if (vec == ECR_V_ITLB_MISS) {
|
||||
pr_cont("Insn could not be fetched\n");
|
||||
} else if (vec == ECR_V_MACH_CHK) {
|
||||
@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs)
|
||||
|
||||
show_ecr_verbose(regs);
|
||||
|
||||
pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
|
||||
current->thread.fault_address,
|
||||
(void *)regs->blink, (void *)regs->ret);
|
||||
|
||||
if (user_mode(regs))
|
||||
show_faulting_vma(regs->ret); /* faulting code, not data */
|
||||
|
||||
pr_info("[STAT32]: 0x%08lx", regs->status32);
|
||||
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
|
||||
regs->event, current->thread.fault_address, regs->ret);
|
||||
|
||||
pr_info("STAT32: 0x%08lx", regs->status32);
|
||||
|
||||
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
|
||||
pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
|
||||
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
|
||||
STS_BIT(regs, DE), STS_BIT(regs, AE),
|
||||
STS_BIT(regs, A2), STS_BIT(regs, A1),
|
||||
STS_BIT(regs, E2), STS_BIT(regs, E1));
|
||||
#else
|
||||
pr_cont(" : %2s%2s%2s%2s\n",
|
||||
pr_cont(" [%2s%2s%2s%2s]",
|
||||
STS_BIT(regs, IE),
|
||||
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
|
||||
STS_BIT(regs, DE), STS_BIT(regs, AE));
|
||||
#endif
|
||||
pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
|
||||
regs->bta, regs->sp, regs->fp);
|
||||
pr_cont(" BTA: 0x%08lx\n", regs->bta);
|
||||
pr_info("BLK: %pS\n SP: 0x%08lx FP: 0x%08lx\n",
|
||||
(void *)regs->blink, regs->sp, regs->fp);
|
||||
pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
|
||||
regs->lp_start, regs->lp_end, regs->lp_count);
|
||||
|
||||
|
@ -307,13 +307,15 @@ endif
|
||||
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
||||
prepare: stack_protector_prepare
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += \
|
||||
$(eval SSP_PLUGIN_CFLAGS := \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
|
||||
awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
|
||||
include/generated/asm-offsets.h) \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
|
||||
awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
|
||||
include/generated/asm-offsets.h))
|
||||
$(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
$(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
endif
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
|
||||
$(libfdt) $(libfdt_hdrs) hyp-stub.S
|
||||
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
|
||||
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp-flags-y)
|
||||
CFLAGS_fdt_rw.o := $(nossp-flags-y)
|
||||
CFLAGS_fdt_wip.o := $(nossp-flags-y)
|
||||
|
||||
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
|
||||
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
|
||||
-I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
|
||||
asflags-y := -DZIMAGE
|
||||
|
||||
# Supply kernel BSS size to the decompressor via a linker symbol.
|
||||
|
@ -94,6 +94,8 @@ static bool __init cntvct_functional(void)
|
||||
* this.
|
||||
*/
|
||||
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
|
||||
if (!np)
|
||||
np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
|
||||
if (!np)
|
||||
goto out_put;
|
||||
|
||||
|
@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user)
|
||||
|
||||
ENDPROC(arm_copy_from_user)
|
||||
|
||||
.pushsection .fixup,"ax"
|
||||
.pushsection .text.fixup,"ax"
|
||||
.align 0
|
||||
copy_abort_preamble
|
||||
ldmfd sp!, {r1, r2, r3}
|
||||
|
@ -119,12 +119,12 @@
|
||||
|
||||
ethernet@e4000 {
|
||||
phy-handle = <&rgmii_phy1>;
|
||||
phy-connection-type = "rgmii-txid";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e6000 {
|
||||
phy-handle = <&rgmii_phy2>;
|
||||
phy-connection-type = "rgmii-txid";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e8000 {
|
||||
|
@ -131,12 +131,12 @@
|
||||
&fman0 {
|
||||
ethernet@e4000 {
|
||||
phy-handle = <&rgmii_phy1>;
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e6000 {
|
||||
phy-handle = <&rgmii_phy2>;
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e8000 {
|
||||
|
@ -55,10 +55,10 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
break;
|
||||
}
|
||||
chacha_4block_xor_neon(state, dst, src, nrounds, l);
|
||||
bytes -= CHACHA_BLOCK_SIZE * 5;
|
||||
src += CHACHA_BLOCK_SIZE * 5;
|
||||
dst += CHACHA_BLOCK_SIZE * 5;
|
||||
state[12] += 5;
|
||||
bytes -= l;
|
||||
src += l;
|
||||
dst += l;
|
||||
state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,11 +29,9 @@ typedef struct {
|
||||
*/
|
||||
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
||||
|
||||
extern bool arm64_use_ng_mappings;
|
||||
|
||||
static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||
{
|
||||
return arm64_use_ng_mappings;
|
||||
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
||||
}
|
||||
|
||||
typedef void (*bp_hardening_cb_t)(void);
|
||||
|
@ -23,11 +23,13 @@
|
||||
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
extern bool arm64_use_ng_mappings;
|
||||
|
||||
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
|
||||
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
|
||||
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
|
||||
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
|
||||
|
||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
|
||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
|
||||
|
@ -25,8 +25,8 @@
|
||||
#define __NR_compat_gettimeofday 78
|
||||
#define __NR_compat_sigreturn 119
|
||||
#define __NR_compat_rt_sigreturn 173
|
||||
#define __NR_compat_clock_getres 247
|
||||
#define __NR_compat_clock_gettime 263
|
||||
#define __NR_compat_clock_getres 264
|
||||
#define __NR_compat_clock_gettime64 403
|
||||
#define __NR_compat_clock_getres_time64 406
|
||||
|
||||
|
@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The number of CPUs online, not counting this CPU (which may not be
|
||||
* fully online and so not counted in num_online_cpus()).
|
||||
*/
|
||||
static inline unsigned int num_other_online_cpus(void)
|
||||
{
|
||||
unsigned int this_cpu_online = cpu_online(smp_processor_id());
|
||||
|
||||
return num_online_cpus() - this_cpu_online;
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
if (num_other_online_cpus()) {
|
||||
cpumask_t mask;
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
@ -975,10 +986,10 @@ void smp_send_stop(void)
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while (num_online_cpus() > 1 && timeout--)
|
||||
while (num_other_online_cpus() && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
if (num_other_online_cpus())
|
||||
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||
cpumask_pr_args(cpu_online_mask));
|
||||
|
||||
@ -1001,7 +1012,11 @@ void crash_smp_send_stop(void)
|
||||
|
||||
cpus_stopped = 1;
|
||||
|
||||
if (num_online_cpus() == 1) {
|
||||
/*
|
||||
* If this cpu is the only one alive at this point in time, online or
|
||||
* not, there are no stop messages to be sent around, so just back out.
|
||||
*/
|
||||
if (num_other_online_cpus() == 0) {
|
||||
sdei_mask_local_cpu();
|
||||
return;
|
||||
}
|
||||
@ -1009,7 +1024,7 @@ void crash_smp_send_stop(void)
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
|
||||
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
|
||||
|
@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
|
||||
kvmppc_mmu_destroy_pr(vcpu);
|
||||
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
kfree(vcpu->arch.shadow_vcpu);
|
||||
|
@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
|
||||
out_vcpu_uninit:
|
||||
kvmppc_mmu_destroy(vcpu);
|
||||
kvmppc_subarch_vcpu_uninit(vcpu);
|
||||
return err;
|
||||
}
|
||||
@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvmppc_core_vcpu_free(vcpu);
|
||||
|
||||
kvmppc_mmu_destroy(vcpu);
|
||||
kvmppc_subarch_vcpu_uninit(vcpu);
|
||||
}
|
||||
|
||||
|
@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
|
||||
unsigned long k_cur;
|
||||
phys_addr_t pa = __pa(kasan_early_shadow_page);
|
||||
|
||||
if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
|
||||
int ret = kasan_init_shadow_page_tables(k_start, k_end);
|
||||
|
||||
if (ret)
|
||||
panic("kasan: kasan_init_shadow_page_tables() failed");
|
||||
}
|
||||
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
|
||||
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
|
||||
@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
|
||||
int ret;
|
||||
struct memblock_region *reg;
|
||||
|
||||
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
|
||||
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
|
||||
IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
if (ret)
|
||||
|
@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
|
||||
/* Initial reset is a superset of the normal reset */
|
||||
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
|
||||
|
||||
/* this equals initial cpu reset in pop, but we don't switch to ESA */
|
||||
/*
|
||||
* This equals initial cpu reset in pop, but we don't switch to ESA.
|
||||
* We do not only reset the internal data, but also ...
|
||||
*/
|
||||
vcpu->arch.sie_block->gpsw.mask = 0;
|
||||
vcpu->arch.sie_block->gpsw.addr = 0;
|
||||
kvm_s390_set_prefix(vcpu, 0);
|
||||
@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
|
||||
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
|
||||
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
|
||||
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
|
||||
|
||||
/* ... the data in sync regs */
|
||||
memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
|
||||
vcpu->run->s.regs.ckc = 0;
|
||||
vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
|
||||
vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
|
||||
vcpu->run->psw_addr = 0;
|
||||
vcpu->run->psw_mask = 0;
|
||||
vcpu->run->s.regs.todpr = 0;
|
||||
vcpu->run->s.regs.cputm = 0;
|
||||
vcpu->run->s.regs.ckc = 0;
|
||||
vcpu->run->s.regs.pp = 0;
|
||||
vcpu->run->s.regs.gbea = 1;
|
||||
vcpu->run->s.regs.fpc = 0;
|
||||
vcpu->arch.sie_block->gbea = 1;
|
||||
vcpu->arch.sie_block->pp = 0;
|
||||
|
@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
|
||||
|
||||
/*
|
||||
* NB and Last level cache counters (MSRs) are shared across all cores
|
||||
* that share the same NB / Last level cache. Interrupts can be directed
|
||||
* to a single target core, however, event counts generated by processes
|
||||
* running on other cores cannot be masked out. So we do not support
|
||||
* sampling and per-thread events.
|
||||
* that share the same NB / Last level cache. On family 16h and below,
|
||||
* Interrupts can be directed to a single target core, however, event
|
||||
* counts generated by processes running on other cores cannot be masked
|
||||
* out. So we do not support sampling and per-thread events via
|
||||
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
|
||||
*/
|
||||
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* and we do not enable counter overflow interrupts */
|
||||
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
|
||||
hwc->idx = -1;
|
||||
|
||||
@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
|
||||
.start = amd_uncore_start,
|
||||
.stop = amd_uncore_stop,
|
||||
.read = amd_uncore_read,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
|
||||
};
|
||||
|
||||
static struct pmu amd_llc_pmu = {
|
||||
@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
|
||||
.start = amd_uncore_start,
|
||||
.stop = amd_uncore_stop,
|
||||
.read = amd_uncore_read,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
|
||||
};
|
||||
|
||||
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
|
||||
|
@ -360,7 +360,6 @@ struct x86_emulate_ctxt {
|
||||
u64 d;
|
||||
unsigned long _eip;
|
||||
struct operand memop;
|
||||
/* Fields above regs are cleared together. */
|
||||
unsigned long _regs[NR_VCPU_REGS];
|
||||
struct operand *memopp;
|
||||
struct fetch_cache fetch;
|
||||
|
@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
|
||||
bool managed = apicd->is_managed;
|
||||
|
||||
/*
|
||||
* This should never happen. Managed interrupts are not
|
||||
* migrated except on CPU down, which does not involve the
|
||||
* cleanup vector. But try to keep the accounting correct
|
||||
* nevertheless.
|
||||
* Managed interrupts are usually not migrated away
|
||||
* from an online CPU, but CPU isolation 'managed_irq'
|
||||
* can make that happen.
|
||||
* 1) Activation does not take the isolation into account
|
||||
* to keep the code simple
|
||||
* 2) Migration away from an isolated CPU can happen when
|
||||
* a non-isolated CPU which is in the calculated
|
||||
* affinity mask comes online.
|
||||
*/
|
||||
WARN_ON_ONCE(managed);
|
||||
|
||||
trace_vector_free_moved(apicd->irq, cpu, vector, managed);
|
||||
irq_matrix_free(vector_matrix, cpu, vector, managed);
|
||||
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
|
||||
|
@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
if ((val & 3UL) == 1UL) {
|
||||
/* PPIN available but disabled: */
|
||||
/* PPIN locked in disabled mode */
|
||||
return;
|
||||
}
|
||||
|
||||
/* If PPIN is disabled, but not locked, try to enable: */
|
||||
if (!(val & 3UL)) {
|
||||
/* If PPIN is disabled, try to enable */
|
||||
if (!(val & 2UL)) {
|
||||
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
|
||||
rdmsrl_safe(MSR_PPIN_CTL, &val);
|
||||
}
|
||||
|
||||
if ((val & 3UL) == 2UL)
|
||||
/* Is the enable bit set? */
|
||||
if (val & 2UL)
|
||||
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
|
||||
}
|
||||
}
|
||||
|
@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
|
||||
{
|
||||
struct thermal_state *state = &per_cpu(thermal_state, cpu);
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
u32 l;
|
||||
|
||||
cancel_delayed_work(&state->package_throttle.therm_work);
|
||||
cancel_delayed_work(&state->core_throttle.therm_work);
|
||||
/* Mask the thermal vector before draining evtl. pending work */
|
||||
l = apic_read(APIC_LVTTHMR);
|
||||
apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
|
||||
|
||||
cancel_delayed_work_sync(&state->package_throttle.therm_work);
|
||||
cancel_delayed_work_sync(&state->core_throttle.therm_work);
|
||||
|
||||
state->package_throttle.rate_control_active = false;
|
||||
state->core_throttle.rate_control_active = false;
|
||||
|
@ -68,7 +68,7 @@ config KVM_WERROR
|
||||
depends on (X86_64 && !KASAN) || !COMPILE_TEST
|
||||
depends on EXPERT
|
||||
help
|
||||
Add -Werror to the build flags for (and only for) i915.ko.
|
||||
Add -Werror to the build flags for KVM.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
|
@ -5173,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
||||
ctxt->fetch.ptr = ctxt->fetch.data;
|
||||
ctxt->fetch.end = ctxt->fetch.data + insn_len;
|
||||
ctxt->opcode_len = 1;
|
||||
ctxt->intercept = x86_intercept_none;
|
||||
if (insn_len > 0)
|
||||
memcpy(ctxt->fetch.data, insn, insn_len);
|
||||
else {
|
||||
|
@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
if (e->fields.delivery_mode == APIC_DM_FIXED) {
|
||||
struct kvm_lapic_irq irq;
|
||||
|
||||
irq.shorthand = APIC_DEST_NOSHORT;
|
||||
irq.vector = e->fields.vector;
|
||||
irq.delivery_mode = e->fields.delivery_mode << 8;
|
||||
irq.dest_id = e->fields.dest_id;
|
||||
irq.dest_mode =
|
||||
kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
|
||||
irq.level = false;
|
||||
irq.trig_mode = e->fields.trig_mode;
|
||||
irq.shorthand = APIC_DEST_NOSHORT;
|
||||
irq.dest_id = e->fields.dest_id;
|
||||
irq.msi_redir_hint = false;
|
||||
bitmap_zero(&vcpu_bitmap, 16);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
|
@ -6312,7 +6312,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion *exit_fastpath)
|
||||
{
|
||||
if (!is_guest_mode(vcpu) &&
|
||||
to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
|
||||
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
|
||||
to_svm(vcpu)->vmcb->control.exit_info_1)
|
||||
*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
|
||||
vmx->nested.hv_evmcs_vmptr = -1ull;
|
||||
vmx->nested.hv_evmcs_vmptr = 0;
|
||||
vmx->nested.hv_evmcs = NULL;
|
||||
}
|
||||
|
||||
@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
|
||||
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
|
||||
return 1;
|
||||
|
||||
if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
|
||||
if (unlikely(!vmx->nested.hv_evmcs ||
|
||||
evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
|
||||
if (!vmx->nested.hv_evmcs)
|
||||
vmx->nested.current_vmptr = -1ull;
|
||||
|
||||
|
@ -2338,6 +2338,17 @@ static void hardware_disable(void)
|
||||
kvm_cpu_vmxoff();
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
|
||||
* directly instead of going through cpu_has(), to ensure KVM is trapping
|
||||
* ENCLS whenever it's supported in hardware. It does not matter whether
|
||||
* the host OS supports or has enabled SGX.
|
||||
*/
|
||||
static bool cpu_has_sgx(void)
|
||||
{
|
||||
return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
|
||||
}
|
||||
|
||||
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
|
||||
u32 msr, u32 *result)
|
||||
{
|
||||
@ -2418,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
|
||||
SECONDARY_EXEC_PT_USE_GPA |
|
||||
SECONDARY_EXEC_PT_CONCEAL_VMX |
|
||||
SECONDARY_EXEC_ENABLE_VMFUNC |
|
||||
SECONDARY_EXEC_ENCLS_EXITING;
|
||||
SECONDARY_EXEC_ENABLE_VMFUNC;
|
||||
if (cpu_has_sgx())
|
||||
opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
|
||||
if (adjust_vmx_controls(min2, opt2,
|
||||
MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
&_cpu_based_2nd_exec_control) < 0)
|
||||
|
@ -7195,10 +7195,12 @@ static void kvm_timer_init(void)
|
||||
|
||||
cpu = get_cpu();
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (policy && policy->cpuinfo.max_freq)
|
||||
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||
if (policy) {
|
||||
if (policy->cpuinfo.max_freq)
|
||||
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
put_cpu();
|
||||
cpufreq_cpu_put(policy);
|
||||
#endif
|
||||
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||
return pmd_k;
|
||||
}
|
||||
|
||||
void vmalloc_sync_all(void)
|
||||
static void vmalloc_sync(void)
|
||||
{
|
||||
unsigned long address;
|
||||
|
||||
@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit:
|
||||
*
|
||||
@ -319,11 +329,23 @@ out:
|
||||
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
void vmalloc_sync_all(void)
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
/*
|
||||
* 64-bit mappings might allocate new p4d/pud pages
|
||||
* that need to be propagated to all tasks' PGDs.
|
||||
*/
|
||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
/*
|
||||
* Unmappings never allocate or free p4d/pud pages.
|
||||
* No work is required here.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit:
|
||||
*
|
||||
|
@ -106,6 +106,22 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The EFI runtime services data area is not covered by walk_mem_res(), but must
|
||||
* be mapped encrypted when SEV is active.
|
||||
*/
|
||||
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
|
||||
{
|
||||
if (!sev_active())
|
||||
return;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
return;
|
||||
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
|
||||
desc->flags |= IORES_MAP_ENCRYPTED;
|
||||
}
|
||||
|
||||
static int __ioremap_collect_map_flags(struct resource *res, void *arg)
|
||||
{
|
||||
struct ioremap_desc *desc = arg;
|
||||
@ -124,6 +140,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
|
||||
* To avoid multiple resource walks, this function walks resources marked as
|
||||
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
|
||||
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
|
||||
*
|
||||
* After that, deal with misc other ranges in __ioremap_check_other() which do
|
||||
* not fall into the above category.
|
||||
*/
|
||||
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
|
||||
struct ioremap_desc *desc)
|
||||
@ -135,6 +154,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
|
||||
memset(desc, 0, sizeof(struct ioremap_desc));
|
||||
|
||||
walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
|
||||
|
||||
__ioremap_check_other(addr, desc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
}
|
||||
/* and dreg_lo,sreg_lo */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
if (is_jmp64) {
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
}
|
||||
goto emit_cond_jmp;
|
||||
}
|
||||
case BPF_JMP | BPF_JSET | BPF_K:
|
||||
|
@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
|
||||
return false;
|
||||
|
||||
/* is something in flight? */
|
||||
if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
|
||||
if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
WARN_ON(e && (rq->tag != -1));
|
||||
|
||||
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
|
||||
/*
|
||||
* Firstly normal IO request is inserted to scheduler queue or
|
||||
* sw queue, meantime we add flush request to dispatch queue(
|
||||
* hctx->dispatch) directly and there is at most one in-flight
|
||||
* flush request for each hw queue, so it doesn't matter to add
|
||||
* flush request to tail or front of the dispatch queue.
|
||||
*
|
||||
* Secondly in case of NCQ, flush request belongs to non-NCQ
|
||||
* command, and queueing it will fail when there is any
|
||||
* in-flight normal IO request(NCQ command). When adding flush
|
||||
* rq to the front of hctx->dispatch, it is easier to introduce
|
||||
* extra time to flush rq's latency because of S_SCHED_RESTART
|
||||
* compared with adding to the tail of dispatch queue, then
|
||||
* chance of flush merge is increased, and less flush requests
|
||||
* will be issued to controller. It is observed that ~10% time
|
||||
* is saved in blktests block/004 on disk attached to AHCI/NCQ
|
||||
* drive when adding flush rq to the front of hctx->dispatch.
|
||||
*
|
||||
* Simply queue flush rq to the front of hctx->dispatch so that
|
||||
* intensive flush workloads can benefit in case of NCQ HW.
|
||||
*/
|
||||
at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
|
||||
blk_mq_request_bypass_insert(rq, at_head, false);
|
||||
goto run;
|
||||
}
|
||||
|
@ -301,6 +301,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
|
||||
|
||||
/**
|
||||
* disk_has_partitions
|
||||
* @disk: gendisk of interest
|
||||
*
|
||||
* Walk through the partition table and check if valid partition exists.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't care.
|
||||
*
|
||||
* RETURNS:
|
||||
* True if the gendisk has at least one valid non-zero size partition.
|
||||
* Otherwise false.
|
||||
*/
|
||||
bool disk_has_partitions(struct gendisk *disk)
|
||||
{
|
||||
struct disk_part_tbl *ptbl;
|
||||
int i;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
ptbl = rcu_dereference(disk->part_tbl);
|
||||
|
||||
/* Iterate partitions skipping the whole device at index 0 */
|
||||
for (i = 1; i < ptbl->len; i++) {
|
||||
if (rcu_dereference(ptbl->part[i])) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disk_has_partitions);
|
||||
|
||||
/*
|
||||
* Can be deleted altogether. Later.
|
||||
*
|
||||
|
@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
|
||||
* New allocation must be visible in all pgd before it can be found by
|
||||
* an NMI allocating from the pool.
|
||||
*/
|
||||
vmalloc_sync_all();
|
||||
vmalloc_sync_mappings();
|
||||
|
||||
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
||||
if (rc)
|
||||
|
@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||
inode->i_uid = info->root_uid;
|
||||
inode->i_gid = info->root_gid;
|
||||
|
||||
refcount_set(&device->ref, 1);
|
||||
device->binderfs_inode = inode;
|
||||
device->miscdev.minor = minor;
|
||||
|
||||
|
@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name);
|
||||
*
|
||||
* Returns: The number of clocks that are possible parents of this node
|
||||
*/
|
||||
unsigned int of_clk_get_parent_count(struct device_node *np)
|
||||
unsigned int of_clk_get_parent_count(const struct device_node *np)
|
||||
{
|
||||
int count;
|
||||
|
||||
@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
|
||||
|
||||
const char *of_clk_get_parent_name(struct device_node *np, int index)
|
||||
const char *of_clk_get_parent_name(const struct device_node *np, int index)
|
||||
{
|
||||
struct of_phandle_args clkspec;
|
||||
struct property *prop;
|
||||
|
@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
|
||||
.halt_reg = 0x400c,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.clkr = {
|
||||
.enable_reg = 0x400c,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "disp_cc_mdss_rscc_ahb_clk",
|
||||
.parent_data = &(const struct clk_parent_data){
|
||||
.hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
|
||||
},
|
||||
.num_parents = 1,
|
||||
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
|
||||
.halt_reg = 0x4008,
|
||||
.halt_check = BRANCH_HALT,
|
||||
@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = {
|
||||
[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
|
||||
[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
|
||||
[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
|
||||
[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
|
||||
[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
|
||||
[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
|
||||
[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
|
||||
|
@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = {
|
||||
|
||||
static struct clk_branch video_cc_vcodec0_core_clk = {
|
||||
.halt_reg = 0x890,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.halt_check = BRANCH_HALT_VOTED,
|
||||
.clkr = {
|
||||
.enable_reg = 0x890,
|
||||
.enable_mask = BIT(0),
|
||||
|
@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device)
|
||||
}
|
||||
|
||||
if (!device->device_release)
|
||||
dev_warn(device->dev,
|
||||
dev_dbg(device->dev,
|
||||
"WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
|
||||
|
||||
kref_init(&device->ref);
|
||||
|
@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
dev = &idxd->pdev->dev;
|
||||
idxd_cdev = &wq->idxd_cdev;
|
||||
|
||||
dev_dbg(dev, "%s called\n", __func__);
|
||||
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
|
||||
|
||||
if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
|
||||
if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
|
||||
return -EBUSY;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
|
@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
|
||||
if (IS_ERR(flow->udma_rflow)) {
|
||||
ret = PTR_ERR(flow->udma_rflow);
|
||||
dev_err(dev, "UDMAX rflow get err %d\n", ret);
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
|
||||
xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err_rflow_put;
|
||||
}
|
||||
|
||||
/* request and cfg rings */
|
||||
@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
|
||||
if (!flow->ringrx) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "Failed to get RX ring\n");
|
||||
goto err;
|
||||
goto err_rflow_put;
|
||||
}
|
||||
|
||||
flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
|
||||
@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
|
||||
if (!flow->ringrxfdq) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "Failed to get RXFDQ ring\n");
|
||||
goto err;
|
||||
goto err_ringrx_free;
|
||||
}
|
||||
|
||||
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
|
||||
goto err;
|
||||
goto err_ringrxfdq_free;
|
||||
}
|
||||
|
||||
ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
|
||||
goto err;
|
||||
goto err_ringrxfdq_free;
|
||||
}
|
||||
|
||||
if (rx_chn->remote) {
|
||||
@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
|
||||
if (ret) {
|
||||
dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
|
||||
ret);
|
||||
goto err;
|
||||
goto err_ringrxfdq_free;
|
||||
}
|
||||
|
||||
rx_chn->flows_ready++;
|
||||
@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
|
||||
flow->udma_rflow_id, rx_chn->flows_ready);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
|
||||
|
||||
err_ringrxfdq_free:
|
||||
k3_ringacc_ring_free(flow->ringrxfdq);
|
||||
|
||||
err_ringrx_free:
|
||||
k3_ringacc_ring_free(flow->ringrx);
|
||||
|
||||
err_rflow_put:
|
||||
xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
|
||||
flow->udma_rflow = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -83,13 +83,16 @@ static ssize_t
|
||||
efivar_attr_read(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
struct efi_variable *var = &entry->var;
|
||||
unsigned long size = sizeof(var->Data);
|
||||
char *str = buf;
|
||||
int ret;
|
||||
|
||||
if (!entry || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
var->DataSize = 1024;
|
||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
||||
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||
var->DataSize = size;
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
|
||||
@ -116,13 +119,16 @@ static ssize_t
|
||||
efivar_size_read(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
struct efi_variable *var = &entry->var;
|
||||
unsigned long size = sizeof(var->Data);
|
||||
char *str = buf;
|
||||
int ret;
|
||||
|
||||
if (!entry || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
var->DataSize = 1024;
|
||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
||||
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||
var->DataSize = size;
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
str += sprintf(str, "0x%lx\n", var->DataSize);
|
||||
@ -133,12 +139,15 @@ static ssize_t
|
||||
efivar_data_read(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
struct efi_variable *var = &entry->var;
|
||||
unsigned long size = sizeof(var->Data);
|
||||
int ret;
|
||||
|
||||
if (!entry || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
var->DataSize = 1024;
|
||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
||||
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||
var->DataSize = size;
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
memcpy(buf, var->Data, var->DataSize);
|
||||
@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
||||
u8 *data;
|
||||
int err;
|
||||
|
||||
if (!entry || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (in_compat_syscall()) {
|
||||
struct compat_efi_variable *compat;
|
||||
|
||||
@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
struct efi_variable *var = &entry->var;
|
||||
struct compat_efi_variable *compat;
|
||||
unsigned long datasize = sizeof(var->Data);
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (!entry || !buf)
|
||||
return 0;
|
||||
|
||||
var->DataSize = 1024;
|
||||
if (efivar_entry_get(entry, &entry->var.Attributes,
|
||||
&entry->var.DataSize, entry->var.Data))
|
||||
ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
|
||||
var->DataSize = datasize;
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
if (in_compat_syscall()) {
|
||||
|
@ -21,18 +21,21 @@
|
||||
#include "gpiolib.h"
|
||||
#include "gpiolib-acpi.h"
|
||||
|
||||
#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
|
||||
#define QUIRK_NO_WAKEUP 0x02l
|
||||
|
||||
static int run_edge_events_on_boot = -1;
|
||||
module_param(run_edge_events_on_boot, int, 0444);
|
||||
MODULE_PARM_DESC(run_edge_events_on_boot,
|
||||
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
|
||||
|
||||
static int honor_wakeup = -1;
|
||||
module_param(honor_wakeup, int, 0444);
|
||||
MODULE_PARM_DESC(honor_wakeup,
|
||||
"Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
|
||||
static char *ignore_wake;
|
||||
module_param(ignore_wake, charp, 0444);
|
||||
MODULE_PARM_DESC(ignore_wake,
|
||||
"controller@pin combos on which to ignore the ACPI wake flag "
|
||||
"ignore_wake=controller@pin[,controller@pin[,...]]");
|
||||
|
||||
struct acpi_gpiolib_dmi_quirk {
|
||||
bool no_edge_events_on_boot;
|
||||
char *ignore_wake;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct acpi_gpio_event - ACPI GPIO event handler data
|
||||
@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
|
||||
acpi_gpiochip_request_irq(acpi_gpio, event);
|
||||
}
|
||||
|
||||
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
|
||||
{
|
||||
const char *controller, *pin_str;
|
||||
int len, pin;
|
||||
char *endp;
|
||||
|
||||
controller = ignore_wake;
|
||||
while (controller) {
|
||||
pin_str = strchr(controller, '@');
|
||||
if (!pin_str)
|
||||
goto err;
|
||||
|
||||
len = pin_str - controller;
|
||||
if (len == strlen(controller_in) &&
|
||||
strncmp(controller, controller_in, len) == 0) {
|
||||
pin = simple_strtoul(pin_str + 1, &endp, 10);
|
||||
if (*endp != 0 && *endp != ',')
|
||||
goto err;
|
||||
|
||||
if (pin == pin_in)
|
||||
return true;
|
||||
}
|
||||
|
||||
controller = strchr(controller, ',');
|
||||
if (controller)
|
||||
controller++;
|
||||
}
|
||||
|
||||
return false;
|
||||
err:
|
||||
pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
|
||||
ignore_wake);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acpi_gpio_irq_is_wake(struct device *parent,
|
||||
struct acpi_resource_gpio *agpio)
|
||||
{
|
||||
int pin = agpio->pin_table[0];
|
||||
|
||||
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
|
||||
return false;
|
||||
|
||||
if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
|
||||
dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Always returns AE_OK so that we keep looping over the resources */
|
||||
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
||||
void *context)
|
||||
@ -289,7 +343,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
||||
event->handle = evt_handle;
|
||||
event->handler = handler;
|
||||
event->irq = irq;
|
||||
event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
|
||||
event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
|
||||
event->pin = pin;
|
||||
event->desc = desc;
|
||||
|
||||
@ -1328,7 +1382,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.no_edge_events_on_boot = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
@ -1341,16 +1397,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.no_edge_events_on_boot = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Various HP X2 10 Cherry Trail models use an external
|
||||
* embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler. The embedded controller generates various
|
||||
* spurious wakeup events when suspended. So disable wakeup
|
||||
* for its handler (it uses the only ACPI GPIO event handler).
|
||||
* This breaks wakeup when opening the lid, the user needs
|
||||
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
|
||||
* When suspending by closing the LID, the power to the USB
|
||||
* keyboard is turned off, causing INT0002 ACPI events to
|
||||
* trigger once the XHCI controller notices the keyboard is
|
||||
* gone. So INT0002 events cause spurious wakeups too. Ignoring
|
||||
* EC wakes breaks wakeup when opening the lid, the user needs
|
||||
* to press the power-button to wakeup the system. The
|
||||
* alternative is suspend simply not working, which is worse.
|
||||
*/
|
||||
@ -1358,33 +1418,61 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_WAKEUP,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FF:01@0,INT0002:00@2",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FC:02 pin 28, causing spurious wakeups.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "815D"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FC:02@28",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "813E"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FF:01@0",
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
static int acpi_gpio_setup_params(void)
|
||||
{
|
||||
const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
|
||||
const struct dmi_system_id *id;
|
||||
long quirks = 0;
|
||||
|
||||
id = dmi_first_match(gpiolib_acpi_quirks);
|
||||
if (id)
|
||||
quirks = (long)id->driver_data;
|
||||
quirk = id->driver_data;
|
||||
|
||||
if (run_edge_events_on_boot < 0) {
|
||||
if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
|
||||
if (quirk && quirk->no_edge_events_on_boot)
|
||||
run_edge_events_on_boot = 0;
|
||||
else
|
||||
run_edge_events_on_boot = 1;
|
||||
}
|
||||
|
||||
if (honor_wakeup < 0) {
|
||||
if (quirks & QUIRK_NO_WAKEUP)
|
||||
honor_wakeup = 0;
|
||||
else
|
||||
honor_wakeup = 1;
|
||||
}
|
||||
if (ignore_wake == NULL && quirk && quirk->ignore_wake)
|
||||
ignore_wake = quirk->ignore_wake;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2306,9 +2306,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
|
||||
|
||||
/*
|
||||
* Since we override .irq_disable() we need to mimic the
|
||||
* behaviour of __irq_disable() in irq/chip.c.
|
||||
* First call .irq_disable() if it exists, else mimic the
|
||||
* behaviour of mask_irq() which calls .irq_mask() if
|
||||
* it exists.
|
||||
*/
|
||||
if (chip->irq.irq_disable)
|
||||
chip->irq.irq_disable(d);
|
||||
else
|
||||
else if (chip->irq.chip->irq_mask)
|
||||
chip->irq.chip->irq_mask(d);
|
||||
gpiochip_disable_irq(chip, d->hwirq);
|
||||
}
|
||||
|
@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||
ssize_t result = 0;
|
||||
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
||||
|
||||
if (size & 3 || *pos & 3)
|
||||
if (size > 4096 || size & 3 || *pos & 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* decode offset */
|
||||
offset = *pos & GENMASK_ULL(11, 0);
|
||||
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
|
||||
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
|
||||
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
|
||||
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
|
||||
@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
value = data[offset++];
|
||||
value = data[result >> 2];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
result = r;
|
||||
|
@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
amdgpu_fbdev_set_suspend(tmp_adev, 0);
|
||||
|
||||
/* must succeed. */
|
||||
amdgpu_ras_resume(tmp_adev);
|
||||
|
||||
@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
*/
|
||||
amdgpu_unregister_gpu_instance(tmp_adev);
|
||||
|
||||
amdgpu_fbdev_set_suspend(adev, 1);
|
||||
|
||||
/* disable ras on ALL IPs */
|
||||
if (!(in_ras_intr && !use_baco) &&
|
||||
amdgpu_device_ip_need_full_reset(tmp_adev))
|
||||
|
@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
|
||||
if (enable) {
|
||||
if (jpeg_v2_0_is_idle(handle))
|
||||
if (!jpeg_v2_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
jpeg_v2_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
|
||||
continue;
|
||||
|
||||
if (enable) {
|
||||
if (jpeg_v2_5_is_idle(handle))
|
||||
if (!jpeg_v2_5_is_idle(handle))
|
||||
return -EBUSY;
|
||||
jpeg_v2_5_enable_clock_gating(adev, i);
|
||||
} else {
|
||||
|
@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (vcn_v1_0_is_idle(handle))
|
||||
if (!vcn_v1_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v1_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (vcn_v2_0_is_idle(handle))
|
||||
if (!vcn_v2_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v2_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
if (vcn_v2_5_is_idle(handle))
|
||||
if (!vcn_v2_5_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v2_5_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
||||
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state),
|
||||
acrtc_state->active_planes);
|
||||
|
||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
}
|
||||
|
||||
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
|
||||
/*
|
||||
* If there aren't any active_planes then DCH HUBP may be clock-gated.
|
||||
* In that case, pageflip completion interrupts won't fire and pageflip
|
||||
* completion events won't get delivered. Prevent this by sending
|
||||
* pending pageflip events from here if a flip is still pending.
|
||||
*
|
||||
* If any planes are enabled, use dm_pflip_high_irq() instead, to
|
||||
* avoid race conditions between flip programming and completion,
|
||||
* which could cause too early flip completion events.
|
||||
*/
|
||||
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
|
||||
acrtc_state->active_planes == 0) {
|
||||
if (acrtc->event) {
|
||||
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
|
||||
acrtc->event = NULL;
|
||||
|
@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
sink_id.ieee_device_id,
|
||||
sizeof(sink_id.ieee_device_id));
|
||||
|
||||
/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
|
||||
{
|
||||
uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
|
||||
|
||||
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
|
||||
sizeof(str_mbp_2017))) {
|
||||
link->reported_link_cap.link_rate = 0x0c;
|
||||
}
|
||||
}
|
||||
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_HW_REVISION_START,
|
||||
|
@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
|
||||
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
|
||||
.dpp_pg_control = dcn20_dpp_pg_control,
|
||||
.hubp_pg_control = dcn20_hubp_pg_control,
|
||||
.dsc_pg_control = NULL,
|
||||
.update_odm = dcn20_update_odm,
|
||||
.dsc_pg_control = dcn20_dsc_pg_control,
|
||||
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
|
||||
|
@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
|
||||
.dpp_pg_control = dcn20_dpp_pg_control,
|
||||
.hubp_pg_control = dcn20_hubp_pg_control,
|
||||
.dsc_pg_control = NULL,
|
||||
.update_odm = dcn20_update_odm,
|
||||
.dsc_pg_control = dcn20_dsc_pg_control,
|
||||
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
|
||||
|
@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
|
||||
|
||||
MODULE_DEVICE_TABLE(of, komeda_of_match);
|
||||
|
||||
static int komeda_rt_pm_suspend(struct device *dev)
|
||||
static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct komeda_drv *mdrv = dev_get_drvdata(dev);
|
||||
|
||||
return komeda_dev_suspend(mdrv->mdev);
|
||||
}
|
||||
|
||||
static int komeda_rt_pm_resume(struct device *dev)
|
||||
static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
|
||||
{
|
||||
struct komeda_drv *mdrv = dev_get_drvdata(dev);
|
||||
|
||||
|
@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
|
||||
size = min(size, mem);
|
||||
}
|
||||
|
||||
if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
|
||||
DRM_ERROR("Cannot request framebuffer\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
if (pci_request_region(pdev, 0, "bochs-drm") != 0)
|
||||
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
|
||||
|
||||
bochs->fb_map = ioremap(addr, size);
|
||||
if (bochs->fb_map == NULL) {
|
||||
|
@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
|
||||
frame.colorspace = HDMI_COLORSPACE_RGB;
|
||||
|
||||
/* Set up colorimetry */
|
||||
switch (hdmi->hdmi_data.enc_out_encoding) {
|
||||
case V4L2_YCBCR_ENC_601:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
|
||||
switch (hdmi->hdmi_data.enc_out_encoding) {
|
||||
case V4L2_YCBCR_ENC_601:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
case V4L2_YCBCR_ENC_709:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
||||
break;
|
||||
default: /* Carries no data */
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
frame.colorimetry = HDMI_COLORIMETRY_NONE;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
case V4L2_YCBCR_ENC_709:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
||||
break;
|
||||
default: /* Carries no data */
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
}
|
||||
|
||||
frame.scan_mode = HDMI_SCAN_MODE_NONE;
|
||||
|
@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
||||
}
|
||||
|
||||
DRM_DEBUG_LEASE("Creating lease\n");
|
||||
/* lessee will take the ownership of leases */
|
||||
lessee = drm_lease_create(lessor, &leases);
|
||||
|
||||
if (IS_ERR(lessee)) {
|
||||
ret = PTR_ERR(lessee);
|
||||
idr_destroy(&leases);
|
||||
goto out_leases;
|
||||
}
|
||||
|
||||
@ -580,7 +582,6 @@ out_lessee:
|
||||
|
||||
out_leases:
|
||||
put_unused_fd(fd);
|
||||
idr_destroy(&leases);
|
||||
|
||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
|
||||
spin_unlock(&old->breadcrumbs.irq_lock);
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
last_active(const struct intel_engine_execlists *execlists)
|
||||
{
|
||||
struct i915_request * const *last = READ_ONCE(execlists->active);
|
||||
|
||||
while (*last && i915_request_completed(*last))
|
||||
last++;
|
||||
|
||||
return *last;
|
||||
}
|
||||
|
||||
#define for_each_waiter(p__, rq__) \
|
||||
list_for_each_entry_lockless(p__, \
|
||||
&(rq__)->sched.waiters_list, \
|
||||
@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
|
||||
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
|
||||
}
|
||||
|
||||
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
|
||||
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = last_active(&engine->execlists);
|
||||
if (!rq)
|
||||
return 0;
|
||||
|
||||
@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
|
||||
return READ_ONCE(engine->props.preempt_timeout_ms);
|
||||
}
|
||||
|
||||
static void set_preempt_timeout(struct intel_engine_cs *engine)
|
||||
static void set_preempt_timeout(struct intel_engine_cs *engine,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
if (!intel_engine_has_preempt_reset(engine))
|
||||
return;
|
||||
|
||||
set_timer_ms(&engine->execlists.preempt,
|
||||
active_preempt_timeout(engine));
|
||||
active_preempt_timeout(engine, rq));
|
||||
}
|
||||
|
||||
static inline void clear_ports(struct i915_request **ports, int count)
|
||||
@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request **port = execlists->pending;
|
||||
struct i915_request ** const last_port = port + execlists->port_mask;
|
||||
struct i915_request * const *active;
|
||||
struct i915_request *last;
|
||||
struct rb_node *rb;
|
||||
bool submit = false;
|
||||
@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
* i.e. we will retrigger preemption following the ack in case
|
||||
* of trouble.
|
||||
*/
|
||||
last = last_active(execlists);
|
||||
active = READ_ONCE(execlists->active);
|
||||
while ((last = *active) && i915_request_completed(last))
|
||||
active++;
|
||||
|
||||
if (last) {
|
||||
if (need_preempt(engine, last, rb)) {
|
||||
ENGINE_TRACE(engine,
|
||||
@ -2110,7 +2102,7 @@ done:
|
||||
* Skip if we ended up with exactly the same set of requests,
|
||||
* e.g. trying to timeslice a pair of ordered contexts
|
||||
*/
|
||||
if (!memcmp(execlists->active, execlists->pending,
|
||||
if (!memcmp(active, execlists->pending,
|
||||
(port - execlists->pending + 1) * sizeof(*port))) {
|
||||
do
|
||||
execlists_schedule_out(fetch_and_zero(port));
|
||||
@ -2121,7 +2113,7 @@ done:
|
||||
clear_ports(port + 1, last_port - port);
|
||||
|
||||
execlists_submit_ports(engine);
|
||||
set_preempt_timeout(engine);
|
||||
set_preempt_timeout(engine, *active);
|
||||
} else {
|
||||
skip_submit:
|
||||
ring_set_paused(engine, 0);
|
||||
@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
|
||||
|
||||
*cs++ = preparser_disable(false);
|
||||
intel_ring_advance(request, cs);
|
||||
|
||||
/*
|
||||
* Wa_1604544889:tgl
|
||||
*/
|
||||
if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
flags = 0;
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
|
||||
|
||||
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
|
||||
cs = intel_ring_begin(request, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
cs = gen8_emit_pipe_control(cs, flags,
|
||||
LRC_PPHWSP_SCRATCH_ADDR);
|
||||
intel_ring_advance(request, cs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1529,15 +1529,34 @@ err_obj:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
u32 start;
|
||||
u32 end;
|
||||
} mcr_ranges_gen8[] = {
|
||||
{ .start = 0x5500, .end = 0x55ff },
|
||||
{ .start = 0x7000, .end = 0x7fff },
|
||||
{ .start = 0x9400, .end = 0x97ff },
|
||||
{ .start = 0xb000, .end = 0xb3ff },
|
||||
{ .start = 0xe000, .end = 0xe7ff },
|
||||
{},
|
||||
};
|
||||
|
||||
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Registers in this range are affected by the MCR selector
|
||||
* Registers in these ranges are affected by the MCR selector
|
||||
* which only controls CPU initiated MMIO. Routing does not
|
||||
* work for CS access so we cannot verify them on this path.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
|
||||
return true;
|
||||
for (i = 0; mcr_ranges_gen8[i].start; i++)
|
||||
if (offset >= mcr_ranges_gen8[i].start &&
|
||||
offset <= mcr_ranges_gen8[i].end)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -532,6 +532,8 @@ static const struct hid_device_id hammer_devices[] = {
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
|
@ -478,6 +478,7 @@
|
||||
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
|
||||
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
|
||||
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
||||
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
|
||||
|
||||
#define USB_VENDOR_ID_GOTOP 0x08f2
|
||||
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
||||
@ -726,6 +727,7 @@
|
||||
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
|
||||
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
|
||||
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
|
||||
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
|
||||
|
||||
#define USB_VENDOR_ID_LG 0x1fd2
|
||||
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
|
||||
|
@ -458,9 +458,9 @@ static ssize_t picolcd_fb_update_rate_show(struct device *dev,
|
||||
if (ret >= PAGE_SIZE)
|
||||
break;
|
||||
else if (i == fb_update_rate)
|
||||
ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
|
||||
ret += scnprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
|
||||
else
|
||||
ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
|
||||
ret += scnprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
|
||||
if (ret > 0)
|
||||
buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
|
||||
return ret;
|
||||
|
@ -103,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
|
||||
|
@ -313,7 +313,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
while (i < ret) {
|
||||
if (i + attribute->size > ret) {
|
||||
len += snprintf(&buf[len],
|
||||
len += scnprintf(&buf[len],
|
||||
PAGE_SIZE - len,
|
||||
"%d ", values[i]);
|
||||
break;
|
||||
@ -336,10 +336,10 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
|
||||
++i;
|
||||
break;
|
||||
}
|
||||
len += snprintf(&buf[len], PAGE_SIZE - len,
|
||||
len += scnprintf(&buf[len], PAGE_SIZE - len,
|
||||
"%lld ", value);
|
||||
}
|
||||
len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
|
||||
len += scnprintf(&buf[len], PAGE_SIZE - len, "\n");
|
||||
|
||||
return len;
|
||||
} else if (input)
|
||||
|
@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win,
|
||||
|
||||
if (old != expect) {
|
||||
ret = -EINVAL;
|
||||
dev_warn_ratelimited(msc_dev(win->msc),
|
||||
"expected lockout state %d, got %d\n",
|
||||
expect, old);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -741,6 +738,10 @@ unlock:
|
||||
/* from intel_th_msc_window_unlock(), don't warn if not locked */
|
||||
if (expect == WIN_LOCKED && old == new)
|
||||
return 0;
|
||||
|
||||
dev_warn_ratelimited(msc_dev(win->msc),
|
||||
"expected lockout state %d, got %d\n",
|
||||
expect, old);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc)
|
||||
lockdep_assert_held(&msc->buf_mutex);
|
||||
|
||||
if (msc->mode > MSC_MODE_MULTI)
|
||||
return -ENOTSUPP;
|
||||
return -EINVAL;
|
||||
|
||||
if (msc->mode == MSC_MODE_MULTI) {
|
||||
if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
|
||||
@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
|
||||
} else if (msc->mode == MSC_MODE_MULTI) {
|
||||
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
|
||||
} else {
|
||||
ret = -ENOTSUPP;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
|
||||
if (ret >= 0)
|
||||
*ppos = iter->offset;
|
||||
} else {
|
||||
ret = -ENOTSUPP;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
put_count:
|
||||
|
@ -234,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Elkhart Lake CPU */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Elkhart Lake */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
||||
|
@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = {
|
||||
static inline bool sys_t_need_ts(struct sys_t_output *op)
|
||||
{
|
||||
if (op->node.ts_interval &&
|
||||
time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
|
||||
time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
|
||||
op->ts_jiffies = jiffies;
|
||||
|
||||
return true;
|
||||
@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op)
|
||||
static bool sys_t_need_clock_sync(struct sys_t_output *op)
|
||||
{
|
||||
if (op->node.clocksync_interval &&
|
||||
time_after(op->clocksync_jiffies + op->node.clocksync_interval,
|
||||
jiffies)) {
|
||||
time_after(jiffies,
|
||||
op->clocksync_jiffies + op->node.clocksync_interval)) {
|
||||
op->clocksync_jiffies = jiffies;
|
||||
|
||||
return true;
|
||||
|
@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
i2c_del_adapter(&dev->adapter);
|
||||
devm_free_irq(&pdev->dev, dev->irq, dev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
|
||||
if (ret == -ENOENT)
|
||||
retdesc = ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
if (ret != -EPROBE_DEFER)
|
||||
if (PTR_ERR(retdesc) != -EPROBE_DEFER)
|
||||
dev_err(dev, "error trying to get descriptor: %d\n", ret);
|
||||
|
||||
return retdesc;
|
||||
|
@ -132,11 +132,6 @@
|
||||
#define TCOBASE 0x050
|
||||
#define TCOCTL 0x054
|
||||
|
||||
#define ACPIBASE 0x040
|
||||
#define ACPIBASE_SMI_OFF 0x030
|
||||
#define ACPICTRL 0x044
|
||||
#define ACPICTRL_EN 0x080
|
||||
|
||||
#define SBREG_BAR 0x10
|
||||
#define SBREG_SMBCTRL 0xc6000c
|
||||
#define SBREG_SMBCTRL_DNV 0xcf000c
|
||||
@ -1553,7 +1548,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
|
||||
pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
|
||||
spin_unlock(&p2sb_spinlock);
|
||||
|
||||
res = &tco_res[ICH_RES_MEM_OFF];
|
||||
res = &tco_res[1];
|
||||
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
|
||||
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
|
||||
else
|
||||
@ -1563,7 +1558,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
|
||||
res->flags = IORESOURCE_MEM;
|
||||
|
||||
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
|
||||
tco_res, 3, &spt_tco_platform_data,
|
||||
tco_res, 2, &spt_tco_platform_data,
|
||||
sizeof(spt_tco_platform_data));
|
||||
}
|
||||
|
||||
@ -1576,17 +1571,16 @@ static struct platform_device *
|
||||
i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
|
||||
struct resource *tco_res)
|
||||
{
|
||||
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
|
||||
tco_res, 2, &cnl_tco_platform_data,
|
||||
sizeof(cnl_tco_platform_data));
|
||||
return platform_device_register_resndata(&pci_dev->dev,
|
||||
"iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
|
||||
sizeof(cnl_tco_platform_data));
|
||||
}
|
||||
|
||||
static void i801_add_tco(struct i801_priv *priv)
|
||||
{
|
||||
u32 base_addr, tco_base, tco_ctl, ctrl_val;
|
||||
struct pci_dev *pci_dev = priv->pci_dev;
|
||||
struct resource tco_res[3], *res;
|
||||
unsigned int devfn;
|
||||
struct resource tco_res[2], *res;
|
||||
u32 tco_base, tco_ctl;
|
||||
|
||||
/* If we have ACPI based watchdog use that instead */
|
||||
if (acpi_has_watchdog())
|
||||
@ -1601,30 +1595,15 @@ static void i801_add_tco(struct i801_priv *priv)
|
||||
return;
|
||||
|
||||
memset(tco_res, 0, sizeof(tco_res));
|
||||
|
||||
res = &tco_res[ICH_RES_IO_TCO];
|
||||
/*
|
||||
* Always populate the main iTCO IO resource here. The second entry
|
||||
* for NO_REBOOT MMIO is filled by the SPT specific function.
|
||||
*/
|
||||
res = &tco_res[0];
|
||||
res->start = tco_base & ~1;
|
||||
res->end = res->start + 32 - 1;
|
||||
res->flags = IORESOURCE_IO;
|
||||
|
||||
/*
|
||||
* Power Management registers.
|
||||
*/
|
||||
devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
|
||||
pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
|
||||
|
||||
res = &tco_res[ICH_RES_IO_SMI];
|
||||
res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
|
||||
res->end = res->start + 3;
|
||||
res->flags = IORESOURCE_IO;
|
||||
|
||||
/*
|
||||
* Enable the ACPI I/O space.
|
||||
*/
|
||||
pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
|
||||
ctrl_val |= ACPICTRL_EN;
|
||||
pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
|
||||
|
||||
if (priv->features & FEATURE_TCO_CNL)
|
||||
priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
|
||||
else
|
||||
|
@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
|
||||
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
|
||||
{
|
||||
struct device *dev;
|
||||
struct i2c_client *client;
|
||||
|
||||
dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
|
||||
return dev ? i2c_verify_client(dev) : NULL;
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
client = i2c_verify_client(dev);
|
||||
if (!client)
|
||||
put_device(dev);
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
|
||||
|
@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
|
||||
.realbits = 12, \
|
||||
.storagebits = 16, \
|
||||
.shift = 4, \
|
||||
.endianness = IIO_BE, \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id st_accel_acpi_match[] = {
|
||||
{"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
||||
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
|
||||
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
||||
{ },
|
||||
};
|
||||
|
@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
||||
|
||||
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
|
||||
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
|
||||
u32 cor;
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
@ -731,6 +732,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
||||
chan->type == IIO_PRESSURE)
|
||||
continue;
|
||||
|
||||
if (state) {
|
||||
cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
|
||||
|
||||
if (chan->differential)
|
||||
cor |= (BIT(chan->channel) |
|
||||
BIT(chan->channel2)) <<
|
||||
AT91_SAMA5D2_COR_DIFF_OFFSET;
|
||||
else
|
||||
cor &= ~(BIT(chan->channel) <<
|
||||
AT91_SAMA5D2_COR_DIFF_OFFSET);
|
||||
|
||||
at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
|
||||
}
|
||||
|
||||
if (state) {
|
||||
at91_adc_writel(st, AT91_SAMA5D2_CHER,
|
||||
BIT(chan->channel));
|
||||
|
@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc,
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p)
|
||||
{
|
||||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
int available = stm32_dfsdm_adc_dma_residue(adc);
|
||||
|
||||
while (available >= indio_dev->scan_bytes) {
|
||||
s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
|
||||
|
||||
stm32_dfsdm_process_data(adc, buffer);
|
||||
|
||||
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
|
||||
pf->timestamp);
|
||||
available -= indio_dev->scan_bytes;
|
||||
adc->bufi += indio_dev->scan_bytes;
|
||||
if (adc->bufi >= adc->buf_sz)
|
||||
adc->bufi = 0;
|
||||
}
|
||||
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void stm32_dfsdm_dma_buffer_done(void *data)
|
||||
{
|
||||
struct iio_dev *indio_dev = data;
|
||||
@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
|
||||
int available = stm32_dfsdm_adc_dma_residue(adc);
|
||||
size_t old_pos;
|
||||
|
||||
if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) {
|
||||
iio_trigger_poll_chained(indio_dev->trig);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: In Kernel interface does not support cyclic DMA buffer,and
|
||||
* offers only an interface to push data samples per samples.
|
||||
@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
|
||||
adc->bufi = 0;
|
||||
old_pos = 0;
|
||||
}
|
||||
/* regular iio buffer without trigger */
|
||||
/*
|
||||
* In DMA mode the trigger services of IIO are not used
|
||||
* (e.g. no call to iio_trigger_poll).
|
||||
* Calling irq handler associated to the hardware trigger is not
|
||||
* relevant as the conversions have already been done. Data
|
||||
* transfers are performed directly in DMA callback instead.
|
||||
* This implementation avoids to call trigger irq handler that
|
||||
* may sleep, in an atomic context (DMA irq handler context).
|
||||
*/
|
||||
if (adc->dev_data->type == DFSDM_IIO)
|
||||
iio_push_to_buffers(indio_dev, buffer);
|
||||
}
|
||||
@ -1536,8 +1514,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
|
||||
}
|
||||
|
||||
ret = iio_triggered_buffer_setup(indio_dev,
|
||||
&iio_pollfunc_store_time,
|
||||
&stm32_dfsdm_adc_trigger_handler,
|
||||
&iio_pollfunc_store_time, NULL,
|
||||
&stm32_dfsdm_buffer_setup_ops);
|
||||
if (ret) {
|
||||
stm32_dfsdm_dma_release(indio_dev);
|
||||
|
@ -91,6 +91,8 @@ config SPS30
|
||||
tristate "SPS30 particulate matter sensor"
|
||||
depends on I2C
|
||||
select CRC8
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say Y here to build support for the Sensirion SPS30 particulate
|
||||
matter sensor.
|
||||
|
@ -167,16 +167,17 @@ static int vcnl4200_init(struct vcnl4000_data *data)
|
||||
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
|
||||
switch (id) {
|
||||
case VCNL4200_PROD_ID:
|
||||
/* Integration time is 50ms, but the experiments */
|
||||
/* show 54ms in total. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
|
||||
/* Default wait time is 50ms, add 20% tolerance. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
|
||||
/* Default wait time is 4.8ms, add 20% tolerance. */
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
|
||||
data->al_scale = 24000;
|
||||
break;
|
||||
case VCNL4040_PROD_ID:
|
||||
/* Integration time is 80ms, add 10ms. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
|
||||
/* Default wait time is 80ms, add 20% tolerance. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000);
|
||||
/* Default wait time is 5ms, add 20% tolerance. */
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000);
|
||||
data->al_scale = 120000;
|
||||
break;
|
||||
}
|
||||
|
@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
|
||||
* We read all axes and discard all but one, for optimized
|
||||
* reading, use the triggered buffer.
|
||||
*/
|
||||
*val = le16_to_cpu(hw_values[chan->address]);
|
||||
*val = (s16)le16_to_cpu(hw_values[chan->address]);
|
||||
|
||||
ret = IIO_VAL_INT;
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ static const struct iio_chan_spec ping_chan_spec[] = {
|
||||
|
||||
static const struct of_device_id of_ping_match[] = {
|
||||
{ .compatible = "parallax,ping", .data = &pa_ping_cfg},
|
||||
{ .compatible = "parallax,laserping", .data = &pa_ping_cfg},
|
||||
{ .compatible = "parallax,laserping", .data = &pa_laser_ping_cfg},
|
||||
{},
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user