mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 23:34:05 +08:00
Merge remote-tracking branch 'torvalds/master' into perf/urgent
To check if more kernel API sync is needed and also to see if the perf build tests continue to pass. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
9334030c3b
3
.mailmap
3
.mailmap
@ -80,6 +80,9 @@ Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
|
||||
Christian Brauner <brauner@kernel.org> <christian@brauner.io>
|
||||
Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
|
||||
Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
|
@ -100,6 +100,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
|
@ -7,6 +7,14 @@ directory. These are intended to be small tests to exercise individual code
|
||||
paths in the kernel. Tests are intended to be run after building, installing
|
||||
and booting a kernel.
|
||||
|
||||
Kselftest from mainline can be run on older stable kernels. Running tests
|
||||
from mainline offers the best coverage. Several test rings run mainline
|
||||
kselftest suite on stable releases. The reason is that when a new test
|
||||
gets added to test existing code to regression test a bug, we should be
|
||||
able to run that test on an older kernel. Hence, it is important to keep
|
||||
code that can still test an older kernel and make sure it skips the test
|
||||
gracefully on newer releases.
|
||||
|
||||
You can find additional information on Kselftest framework, how to
|
||||
write new tests using the framework on Kselftest wiki:
|
||||
|
||||
|
@ -107,6 +107,10 @@ properties:
|
||||
- const: imem
|
||||
- const: config
|
||||
|
||||
qcom,qmp:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: phandle to the AOSS side-channel message RAM
|
||||
|
||||
qcom,smem-states:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
description: State bits used in by the AP to signal the modem.
|
||||
@ -222,6 +226,8 @@ examples:
|
||||
"imem",
|
||||
"config";
|
||||
|
||||
qcom,qmp = <&aoss_qmp>;
|
||||
|
||||
qcom,smem-states = <&ipa_smp2p_out 0>,
|
||||
<&ipa_smp2p_out 1>;
|
||||
qcom,smem-state-names = "ipa-clock-enabled-valid",
|
||||
|
@ -23,8 +23,9 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 256
|
||||
items:
|
||||
minimum: 0
|
||||
maximum: 256
|
||||
items:
|
||||
- minimum: 0
|
||||
maximum: 256
|
||||
description:
|
||||
Chip select used by the device.
|
||||
|
||||
|
@ -462,6 +462,10 @@ operation table looks like the following::
|
||||
struct iov_iter *iter,
|
||||
netfs_io_terminated_t term_func,
|
||||
void *term_func_priv);
|
||||
|
||||
int (*query_occupancy)(struct netfs_cache_resources *cres,
|
||||
loff_t start, size_t len, size_t granularity,
|
||||
loff_t *_data_start, size_t *_data_len);
|
||||
};
|
||||
|
||||
With a termination handler function pointer::
|
||||
@ -536,6 +540,18 @@ The methods defined in the table are:
|
||||
indicating whether the termination is definitely happening in the caller's
|
||||
context.
|
||||
|
||||
* ``query_occupancy()``
|
||||
|
||||
[Required] Called to find out where the next piece of data is within a
|
||||
particular region of the cache. The start and length of the region to be
|
||||
queried are passed in, along with the granularity to which the answer needs
|
||||
to be aligned. The function passes back the start and length of the data,
|
||||
if any, available within that region. Note that there may be a hole at the
|
||||
front.
|
||||
|
||||
It returns 0 if some data was found, -ENODATA if there was no usable data
|
||||
within the region or -ENOBUFS if there is no caching on this file.
|
||||
|
||||
Note that these methods are passed a pointer to the cache resource structure,
|
||||
not the read request structure as they could be used in other situations where
|
||||
there isn't a read request structure as well, such as writing dirty data to the
|
||||
|
@ -300,30 +300,6 @@ Contact: Daniel Vetter, Noralf Tronnes
|
||||
|
||||
Level: Advanced
|
||||
|
||||
Garbage collect fbdev scrolling acceleration
|
||||
--------------------------------------------
|
||||
|
||||
Scroll acceleration has been disabled in fbcon. Now it works as the old
|
||||
SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
|
||||
removed from fbcon_ops.
|
||||
Remaining tasks:
|
||||
|
||||
- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
|
||||
directly instead of the function table (with a switch on p->rotate)
|
||||
|
||||
- fb_copyarea is unused after this, and can be deleted from all drivers
|
||||
|
||||
- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
|
||||
well as cfb_copyarea
|
||||
|
||||
Note that not all acceleration code can be deleted, since clearing and cursor
|
||||
support is still accelerated, which might be good candidates for further
|
||||
deletion projects.
|
||||
|
||||
Contact: Daniel Vetter
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
idr_init_base()
|
||||
---------------
|
||||
|
||||
|
@ -115,6 +115,7 @@ Code Seq# Include File Comments
|
||||
'B' 00-1F linux/cciss_ioctl.h conflict!
|
||||
'B' 00-0F include/linux/pmu.h conflict!
|
||||
'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
|
||||
'B' 00-0F xen/xenbus_dev.h conflict!
|
||||
'C' all linux/soundcard.h conflict!
|
||||
'C' 01-2F linux/capi.h conflict!
|
||||
'C' F0-FF drivers/net/wan/cosa.h conflict!
|
||||
@ -134,6 +135,7 @@ Code Seq# Include File Comments
|
||||
'F' 80-8F linux/arcfb.h conflict!
|
||||
'F' DD video/sstfb.h conflict!
|
||||
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
|
||||
'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
|
||||
'H' 00-7F linux/hiddev.h conflict!
|
||||
'H' 00-0F linux/hidraw.h conflict!
|
||||
'H' 01 linux/mei.h conflict!
|
||||
@ -176,6 +178,7 @@ Code Seq# Include File Comments
|
||||
'P' 60-6F sound/sscape_ioctl.h conflict!
|
||||
'P' 00-0F drivers/usb/class/usblp.c conflict!
|
||||
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
|
||||
'P' 00-0F xen/privcmd.h conflict!
|
||||
'Q' all linux/soundcard.h
|
||||
'R' 00-1F linux/random.h conflict!
|
||||
'R' 01 linux/rfkill.h conflict!
|
||||
|
19
MAINTAINERS
19
MAINTAINERS
@ -4157,9 +4157,8 @@ N: csky
|
||||
K: csky
|
||||
|
||||
CA8210 IEEE-802.15.4 RADIO DRIVER
|
||||
M: Harry Morris <h.morris@cascoda.com>
|
||||
L: linux-wpan@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
W: https://github.com/Cascoda/ca8210-linux.git
|
||||
F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
|
||||
F: drivers/net/ieee802154/ca8210.c
|
||||
@ -10880,6 +10879,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: drivers/ata/pata_arasan_cf.c
|
||||
F: include/linux/pata_arasan_cf_data.h
|
||||
|
||||
LIBATA PATA DRIVERS
|
||||
R: Sergey Shtylyov <s.shtylyov@omp.ru>
|
||||
L: linux-ide@vger.kernel.org
|
||||
F: drivers/ata/ata_*.c
|
||||
F: drivers/ata/pata_*.c
|
||||
|
||||
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
@ -12400,7 +12405,7 @@ F: include/uapi/linux/membarrier.h
|
||||
F: kernel/sched/membarrier.c
|
||||
|
||||
MEMBLOCK
|
||||
M: Mike Rapoport <rppt@linux.ibm.com>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/core-api/boot-time-mm.rst
|
||||
@ -16469,6 +16474,14 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
|
||||
F: drivers/i2c/busses/i2c-rcar.c
|
||||
F: drivers/i2c/busses/i2c-sh_mobile.c
|
||||
|
||||
RENESAS R-CAR SATA DRIVER
|
||||
R: Sergey Shtylyov <s.shtylyov@omp.ru>
|
||||
S: Supported
|
||||
L: linux-ide@vger.kernel.org
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
|
||||
F: drivers/ata/sata_rcar.c
|
||||
|
||||
RENESAS R-CAR THERMAL DRIVERS
|
||||
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
|
@ -13,12 +13,12 @@
|
||||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
@ -680,6 +680,22 @@ config ARM64_ERRATUM_2051678
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2077057
|
||||
bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2077057.
|
||||
Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
|
||||
expected, but a Pointer Authentication trap is taken instead. The
|
||||
erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
|
||||
EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
|
||||
|
||||
This can only happen when EL2 is stepping EL1.
|
||||
|
||||
When these conditions occur, the SPSR_EL2 value is unchanged from the
|
||||
previous guest entry, and can be restored from the in-memory copy.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2119858
|
||||
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
|
||||
default y
|
||||
|
@ -600,6 +600,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2077057
|
||||
{
|
||||
.desc = "ARM erratum 2077057",
|
||||
.capability = ARM64_WORKAROUND_2077057,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2064142
|
||||
{
|
||||
.desc = "ARM erratum 2064142",
|
||||
|
@ -797,6 +797,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
|
||||
xfer_to_guest_mode_work_pending();
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
|
||||
guest_state_exit_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
|
||||
* @vcpu: The VCPU pointer
|
||||
@ -881,9 +899,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* Enter the guest
|
||||
*/
|
||||
trace_kvm_entry(*vcpu_pc(vcpu));
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
|
||||
ret = kvm_arm_vcpu_enter_exit(vcpu);
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
vcpu->stat.exits++;
|
||||
@ -918,26 +936,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
kvm_arch_vcpu_ctxsync_fp(vcpu);
|
||||
|
||||
/*
|
||||
* We may have taken a host interrupt in HYP mode (ie
|
||||
* while executing the guest). This interrupt is still
|
||||
* pending, as we haven't serviced it yet!
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* We're now back in SVC mode, with interrupts
|
||||
* disabled. Enabling the interrupts now will have
|
||||
* the effect of taking the interrupt again, in SVC
|
||||
* mode this time.
|
||||
* Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
|
||||
* context synchronization event) is necessary to ensure that
|
||||
* pending interrupts are taken.
|
||||
*/
|
||||
local_irq_enable();
|
||||
isb();
|
||||
local_irq_disable();
|
||||
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* We do local_irq_enable() before calling guest_exit() so
|
||||
* that if a timer interrupt hits while running the guest we
|
||||
* account that tick as being spent in the guest. We enable
|
||||
* preemption after calling guest_exit() so that if we get
|
||||
* preempted we make sure ticks after that is not counted as
|
||||
* guest time.
|
||||
*/
|
||||
guest_exit();
|
||||
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
||||
|
||||
/* Exit types that need handling before we can be preempted */
|
||||
|
@ -228,6 +228,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
if (ARM_SERROR_PENDING(exception_index)) {
|
||||
/*
|
||||
* The SError is handled by handle_exit_early(). If the guest
|
||||
* survives it will re-execute the original instruction.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
exception_index = ARM_EXCEPTION_CODE(exception_index);
|
||||
|
||||
switch (exception_index) {
|
||||
|
@ -402,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
/*
|
||||
* Check for the conditions of Cortex-A510's #2077057. When these occur
|
||||
* SPSR_EL2 can't be trusted, but isn't needed either as it is
|
||||
* unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
|
||||
* Are we single-stepping the guest, and took a PAC exception from the
|
||||
* active-not-pending state?
|
||||
*/
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
|
||||
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
|
||||
*vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
|
||||
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
|
||||
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
|
||||
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true when we were able to fixup the guest exit and should return to
|
||||
* the guest, false when we should restore the host state and return to the
|
||||
@ -413,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||
* early on.
|
||||
*/
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||
|
||||
/*
|
||||
* Check whether we want to repaint the state one way or
|
||||
@ -424,7 +442,8 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||
|
||||
if (ARM_SERROR_PENDING(*exit_code)) {
|
||||
if (ARM_SERROR_PENDING(*exit_code) &&
|
||||
ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
|
||||
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -55,9 +55,10 @@ WORKAROUND_1418040
|
||||
WORKAROUND_1463225
|
||||
WORKAROUND_1508412
|
||||
WORKAROUND_1542419
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_1902691
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
@ -74,7 +74,7 @@
|
||||
#define EXC(inst_reg,addr,handler) \
|
||||
9: inst_reg, addr; \
|
||||
.section __ex_table,"a"; \
|
||||
PTR 9b, handler; \
|
||||
PTR_WD 9b, handler; \
|
||||
.previous
|
||||
|
||||
/*
|
||||
|
@ -414,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
ret = kvm_mips_callbacks->vcpu_run(vcpu);
|
||||
guest_state_exit_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r = -EINTR;
|
||||
@ -434,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
lose_fpu(1);
|
||||
|
||||
local_irq_disable();
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
trace_kvm_enter(vcpu);
|
||||
|
||||
/*
|
||||
@ -445,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
r = kvm_mips_callbacks->vcpu_run(vcpu);
|
||||
r = kvm_mips_vcpu_enter_exit(vcpu);
|
||||
|
||||
/*
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* TODO: is there a barrier which ensures that pending interrupts are
|
||||
* recognised? Currently this just hopes that the CPU takes any pending
|
||||
* interrupts between the enable and disable.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
|
||||
trace_kvm_out(vcpu);
|
||||
guest_exit_irqoff();
|
||||
guest_timing_exit_irqoff();
|
||||
local_irq_enable();
|
||||
|
||||
out:
|
||||
@ -1168,7 +1199,7 @@ static void kvm_mips_set_c0_status(void)
|
||||
/*
|
||||
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
|
||||
*/
|
||||
int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
@ -1357,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
ret = __kvm_mips_handle_exit(vcpu);
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable FPU for guest and restore context */
|
||||
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
|
||||
/**
|
||||
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @compare: Pointer to write compare value to.
|
||||
* @cause: Pointer to write cause value to.
|
||||
* @out_compare: Pointer to write compare value to.
|
||||
* @out_cause: Pointer to write cause value to.
|
||||
*
|
||||
* Save VZ guest timer state and switch to software emulation of guest CP0
|
||||
* timer. The hard timer must already be in use, so preemption should be
|
||||
@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
|
||||
* kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
|
||||
* @vcpu: Virtual CPU context.
|
||||
*
|
||||
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
|
||||
* by the root context.
|
||||
*
|
||||
* Return: value indicating whether to resume the host or the guest
|
||||
* (RESUME_HOST or RESUME_GUEST)
|
||||
*/
|
||||
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
*
|
||||
* Handle when the guest attempts to use MSA when it is disabled in the root
|
||||
* context.
|
||||
*
|
||||
* Return: value indicating whether to resume the host or the guest
|
||||
* (RESUME_HOST or RESUME_GUEST)
|
||||
*/
|
||||
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -90,6 +90,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *cntx;
|
||||
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
|
||||
|
||||
/* Mark this VCPU never ran */
|
||||
vcpu->arch.ran_atleast_once = false;
|
||||
@ -106,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
cntx->hstatus |= HSTATUS_SPVP;
|
||||
cntx->hstatus |= HSTATUS_SPV;
|
||||
|
||||
/* By default, make CY, TM, and IR counters accessible in VU mode */
|
||||
reset_csr->scounteren = 0x7;
|
||||
|
||||
/* Setup VCPU timer */
|
||||
kvm_riscv_vcpu_timer_init(vcpu);
|
||||
|
||||
@ -699,6 +703,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
|
||||
csr_write(CSR_HVIP, csr->hvip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
guest_state_enter_irqoff();
|
||||
__kvm_riscv_switch_to(&vcpu->arch);
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
@ -790,9 +808,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
continue;
|
||||
}
|
||||
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
__kvm_riscv_switch_to(&vcpu->arch);
|
||||
kvm_riscv_vcpu_enter_exit(vcpu);
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
vcpu->stat.exits++;
|
||||
@ -812,25 +830,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
kvm_riscv_vcpu_sync_interrupts(vcpu);
|
||||
|
||||
/*
|
||||
* We may have taken a host interrupt in VS/VU-mode (i.e.
|
||||
* while executing the guest). This interrupt is still
|
||||
* pending, as we haven't serviced it yet!
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* We're now back in HS-mode with interrupts disabled
|
||||
* so enabling the interrupts now will have the effect
|
||||
* of taking the interrupt again, in HS-mode this time.
|
||||
* There's no barrier which ensures that pending interrupts are
|
||||
* recognised, so we just hope that the CPU takes any pending
|
||||
* interrupts between the enable and disable.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
|
||||
/*
|
||||
* We do local_irq_enable() before calling guest_exit() so
|
||||
* that if a timer interrupt hits while running the guest
|
||||
* we account that tick as being spent in the guest. We
|
||||
* enable preemption after calling guest_exit() so that if
|
||||
* we get preempted we make sure ticks after that is not
|
||||
* counted as guest time.
|
||||
*/
|
||||
guest_exit();
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/version.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
@ -32,7 +33,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
*out_val = KVM_SBI_IMPID;
|
||||
break;
|
||||
case SBI_EXT_BASE_GET_IMP_VERSION:
|
||||
*out_val = 0;
|
||||
*out_val = LINUX_VERSION_CODE;
|
||||
break;
|
||||
case SBI_EXT_BASE_PROBE_EXT:
|
||||
if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
|
||||
|
@ -18,12 +18,12 @@
|
||||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
@ -82,7 +82,7 @@ KVM_X86_OP_NULL(guest_apic_has_interrupt)
|
||||
KVM_X86_OP(load_eoi_exitmap)
|
||||
KVM_X86_OP(set_virtual_apic_mode)
|
||||
KVM_X86_OP_NULL(set_apic_access_page_addr)
|
||||
KVM_X86_OP(deliver_posted_interrupt)
|
||||
KVM_X86_OP(deliver_interrupt)
|
||||
KVM_X86_OP_NULL(sync_pir_to_irr)
|
||||
KVM_X86_OP(set_tss_addr)
|
||||
KVM_X86_OP(set_identity_map_addr)
|
||||
|
@ -1410,7 +1410,8 @@ struct kvm_x86_ops {
|
||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
|
||||
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||
void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector);
|
||||
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
|
||||
|
@ -43,20 +43,6 @@ static inline uint32_t xen_cpuid_base(void)
|
||||
return hypervisor_cpuid_base("XenVMMXenVMM", 2);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
extern bool __init xen_hvm_need_lapic(void);
|
||||
|
||||
static inline bool __init xen_x2apic_para_available(void)
|
||||
{
|
||||
return xen_hvm_need_lapic();
|
||||
}
|
||||
#else
|
||||
static inline bool __init xen_x2apic_para_available(void)
|
||||
{
|
||||
return (xen_cpuid_base() != 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
#ifdef CONFIG_XEN_PV_DOM0
|
||||
|
@ -554,12 +554,13 @@ void kvm_set_cpu_caps(void)
|
||||
);
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_7_0_EBX,
|
||||
F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
|
||||
F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
|
||||
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
|
||||
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
|
||||
F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
|
||||
);
|
||||
F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
|
||||
F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
|
||||
F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
|
||||
F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
|
||||
F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
|
||||
F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
|
||||
F(AVX512VL));
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_7_ECX,
|
||||
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
|
||||
|
@ -1096,14 +1096,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
|
||||
trig_mode, vector);
|
||||
break;
|
||||
|
||||
case APIC_DM_REMRD:
|
||||
|
@ -3291,6 +3291,21 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
|
||||
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
|
||||
if (svm_deliver_avic_intr(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
}
|
||||
|
||||
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@ -3615,7 +3630,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
unsigned long vmcb_pa = svm->current_vmcb->pa;
|
||||
|
||||
kvm_guest_enter_irqoff();
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
__svm_sev_es_vcpu_run(vmcb_pa);
|
||||
@ -3635,7 +3650,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
vmload(__sme_page_pa(sd->save_area));
|
||||
}
|
||||
|
||||
kvm_guest_exit_irqoff();
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
@ -4545,7 +4560,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
.pmu_ops = &amd_pmu_ops,
|
||||
.nested_ops = &svm_nested_ops,
|
||||
|
||||
.deliver_posted_interrupt = svm_deliver_avic_intr,
|
||||
.deliver_interrupt = svm_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
|
||||
.update_pi_irte = svm_update_pi_irte,
|
||||
.setup_mce = svm_setup_mce,
|
||||
|
@ -4041,6 +4041,21 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
|
||||
if (vmx_deliver_posted_interrupt(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
|
||||
* will not change in the lifetime of the guest.
|
||||
@ -6754,7 +6769,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_vmx *vmx)
|
||||
{
|
||||
kvm_guest_enter_irqoff();
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
/* L1D Flush includes CPU buffer clear to mitigate MDS */
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
@ -6770,7 +6785,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
|
||||
vcpu->arch.cr2 = native_read_cr2();
|
||||
|
||||
kvm_guest_exit_irqoff();
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
@ -7768,7 +7783,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
||||
.deliver_interrupt = vmx_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
|
||||
|
||||
.set_tss_addr = vmx_set_tss_addr,
|
||||
|
@ -90,6 +90,8 @@
|
||||
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
|
||||
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
|
||||
|
||||
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
|
||||
|
||||
#define emul_to_vcpu(ctxt) \
|
||||
((struct kvm_vcpu *)(ctxt)->vcpu)
|
||||
|
||||
@ -4340,7 +4342,7 @@ static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
|
||||
void __user *uaddr = (void __user*)(unsigned long)attr->addr;
|
||||
|
||||
if ((u64)(unsigned long)uaddr != attr->addr)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return ERR_PTR_USR(-EFAULT);
|
||||
return uaddr;
|
||||
}
|
||||
|
||||
@ -10041,6 +10043,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
set_debugreg(0, 7);
|
||||
}
|
||||
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Assert that vCPU vs. VM APICv state is consistent. An APICv
|
||||
@ -10125,7 +10129,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
* of accounting via context tracking, but the loss of accuracy is
|
||||
* acceptable for all known use cases.
|
||||
*/
|
||||
vtime_account_guest_exit();
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
if (lapic_in_kernel(vcpu)) {
|
||||
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
|
||||
@ -11639,8 +11643,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
|
||||
kvm_free_pit(kvm);
|
||||
}
|
||||
|
||||
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
|
||||
|
||||
/**
|
||||
* __x86_set_memory_region: Setup KVM internal memory slot
|
||||
*
|
||||
|
@ -10,51 +10,6 @@
|
||||
|
||||
void kvm_spurious_fault(void);
|
||||
|
||||
static __always_inline void kvm_guest_enter_irqoff(void)
|
||||
{
|
||||
/*
|
||||
* VMENTER enables interrupts (host state), but the kernel state is
|
||||
* interrupts disabled when this is invoked. Also tell RCU about
|
||||
* it. This is the same logic as for exit_to_user_mode().
|
||||
*
|
||||
* This ensures that e.g. latency analysis on the host observes
|
||||
* guest mode as interrupt enabled.
|
||||
*
|
||||
* guest_enter_irqoff() informs context tracking about the
|
||||
* transition to guest mode and if enabled adjusts RCU state
|
||||
* accordingly.
|
||||
*/
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
instrumentation_end();
|
||||
|
||||
guest_enter_irqoff();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
static __always_inline void kvm_guest_exit_irqoff(void)
|
||||
{
|
||||
/*
|
||||
* VMEXIT disables interrupts (host state), but tracing and lockdep
|
||||
* have them in state 'on' as recorded before entering guest mode.
|
||||
* Same as enter_from_user_mode().
|
||||
*
|
||||
* context_tracking_guest_exit() restores host context and reinstates
|
||||
* RCU if enabled and required.
|
||||
*
|
||||
* This needs to be done immediately after VM-Exit, before any code
|
||||
* that might contain tracepoints or call out to the greater world,
|
||||
* e.g. before x86_spec_ctrl_restore_host().
|
||||
*/
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
context_tracking_guest_exit();
|
||||
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
|
||||
({ \
|
||||
bool failed = (consistency_check); \
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <xen/events.h>
|
||||
#include <xen/interface/memory.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/io_apic.h>
|
||||
@ -242,15 +243,9 @@ static __init int xen_parse_no_vector_callback(char *arg)
|
||||
}
|
||||
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
|
||||
|
||||
bool __init xen_hvm_need_lapic(void)
|
||||
static __init bool xen_x2apic_available(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
return false;
|
||||
if (!xen_hvm_domain())
|
||||
return false;
|
||||
if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
|
||||
return false;
|
||||
return true;
|
||||
return x2apic_supported();
|
||||
}
|
||||
|
||||
static __init void xen_hvm_guest_late_init(void)
|
||||
@ -312,7 +307,7 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
|
||||
.detect = xen_platform_hvm,
|
||||
.type = X86_HYPER_XEN_HVM,
|
||||
.init.init_platform = xen_hvm_guest_init,
|
||||
.init.x2apic_available = xen_x2apic_para_available,
|
||||
.init.x2apic_available = xen_x2apic_available,
|
||||
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||
.init.guest_late_init = xen_hvm_guest_late_init,
|
||||
.runtime.pin_vcpu = xen_pin_vcpu,
|
||||
|
@ -1341,10 +1341,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
|
||||
xen_acpi_sleep_register();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
||||
|
||||
xen_boot_params_init_edd();
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __init xen_fill_possible_map(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
if (xen_initial_domain())
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||
if (rc >= 0) {
|
||||
num_processors++;
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __init xen_filter_cpu_maps(void)
|
||||
static void __init _get_smp_config(unsigned int early)
|
||||
{
|
||||
int i, rc;
|
||||
unsigned int subtract = 0;
|
||||
|
||||
if (!xen_initial_domain())
|
||||
if (early)
|
||||
return;
|
||||
|
||||
num_processors = 0;
|
||||
@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
|
||||
* sure the old memory can be recycled. */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
|
||||
/*
|
||||
@ -476,5 +459,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
|
||||
void __init xen_smp_init(void)
|
||||
{
|
||||
smp_ops = xen_smp_ops;
|
||||
xen_fill_possible_map();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = _get_smp_config;
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
|
||||
|
||||
bip->bip_iter.bi_sector += bytes_done >> 9;
|
||||
bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
|
||||
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
|
||||
}
|
||||
|
||||
|
33
block/fops.c
33
block/fops.c
@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct block_device *bdev = iocb->ki_filp->private_data;
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
size_t count = iov_iter_count(to);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
size_t shorted = 0;
|
||||
ssize_t ret = 0;
|
||||
size_t count;
|
||||
|
||||
if (unlikely(pos + count > size)) {
|
||||
if (unlikely(pos + iov_iter_count(to) > size)) {
|
||||
if (pos >= size)
|
||||
return 0;
|
||||
size -= pos;
|
||||
if (count > size) {
|
||||
shorted = count - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
shorted = iov_iter_count(to) - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
|
||||
count = iov_iter_count(to);
|
||||
if (!count)
|
||||
goto reexpand; /* skip atime */
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1))
|
||||
return -EAGAIN;
|
||||
if (filemap_range_needs_writeback(mapping, pos,
|
||||
pos + count - 1)) {
|
||||
ret = -EAGAIN;
|
||||
goto reexpand;
|
||||
}
|
||||
} else {
|
||||
ret = filemap_write_and_wait_range(mapping,
|
||||
iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1);
|
||||
ret = filemap_write_and_wait_range(mapping, pos,
|
||||
pos + count - 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
iocb->ki_pos += ret;
|
||||
count -= ret;
|
||||
}
|
||||
iov_iter_revert(to, count - iov_iter_count(to));
|
||||
if (ret < 0 || !count)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
ret = filemap_read(iocb, to, ret);
|
||||
|
||||
reexpand:
|
||||
if (unlikely(shorted))
|
||||
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
|
||||
return ret;
|
||||
|
@ -15,12 +15,12 @@
|
||||
static int crypto_blake2s_update_generic(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
|
||||
return crypto_blake2s_update(desc, in, inlen, true);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_generic);
|
||||
return crypto_blake2s_final(desc, out, true);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
@ -11,6 +11,7 @@ menuconfig ACPI
|
||||
depends on ARCH_SUPPORTS_ACPI
|
||||
select PNP
|
||||
select NLS
|
||||
select CRC32
|
||||
default y if X86
|
||||
help
|
||||
Advanced Configuration and Power Interface (ACPI) support for
|
||||
|
@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
|
||||
return false;
|
||||
|
||||
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
|
||||
return false;
|
||||
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
|
||||
@ -4073,6 +4076,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
|
||||
/*
|
||||
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
|
||||
* log page is accessed. Ensure we never ask for this log page with
|
||||
* these devices.
|
||||
*/
|
||||
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
|
||||
|
||||
/* End Marker */
|
||||
{ }
|
||||
};
|
||||
|
@ -762,7 +762,7 @@ static bool crng_init_try_arch(struct crng_state *crng)
|
||||
return arch_init;
|
||||
}
|
||||
|
||||
static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
||||
static bool __init crng_init_try_arch_early(void)
|
||||
{
|
||||
int i;
|
||||
bool arch_init = true;
|
||||
@ -774,7 +774,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
||||
rv = random_get_entropy();
|
||||
arch_init = false;
|
||||
}
|
||||
crng->state[i] ^= rv;
|
||||
primary_crng.state[i] ^= rv;
|
||||
}
|
||||
|
||||
return arch_init;
|
||||
@ -788,22 +788,20 @@ static void crng_initialize_secondary(struct crng_state *crng)
|
||||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
static void __init crng_initialize_primary(struct crng_state *crng)
|
||||
static void __init crng_initialize_primary(void)
|
||||
{
|
||||
_extract_entropy(&crng->state[4], sizeof(u32) * 12);
|
||||
if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
|
||||
_extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
|
||||
if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
pr_notice("crng init done (trusting CPU's manufacturer)\n");
|
||||
}
|
||||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
static void crng_finalize_init(struct crng_state *crng)
|
||||
static void crng_finalize_init(void)
|
||||
{
|
||||
if (crng != &primary_crng || crng_init >= 2)
|
||||
return;
|
||||
if (!system_wq) {
|
||||
/* We can't call numa_crng_init until we have workqueues,
|
||||
* so mark this for processing later. */
|
||||
@ -814,6 +812,7 @@ static void crng_finalize_init(struct crng_state *crng)
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
crng_need_final_init = false;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
@ -980,7 +979,8 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
|
||||
memzero_explicit(&buf, sizeof(buf));
|
||||
WRITE_ONCE(crng->init_time, jiffies);
|
||||
spin_unlock_irqrestore(&crng->lock, flags);
|
||||
crng_finalize_init(crng);
|
||||
if (crng == &primary_crng && crng_init < 2)
|
||||
crng_finalize_init();
|
||||
}
|
||||
|
||||
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
|
||||
@ -1697,8 +1697,8 @@ int __init rand_initialize(void)
|
||||
{
|
||||
init_std_data();
|
||||
if (crng_need_final_init)
|
||||
crng_finalize_init(&primary_crng);
|
||||
crng_initialize_primary(&primary_crng);
|
||||
crng_finalize_init();
|
||||
crng_initialize_primary();
|
||||
crng_global_init_time = jiffies;
|
||||
if (ratelimit_disable) {
|
||||
urandom_warning.interval = 0;
|
||||
@ -1856,7 +1856,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
input_pool.entropy_count = 0;
|
||||
if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
return 0;
|
||||
case RNDRESEEDCRNG:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -2205,13 +2208,15 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Suspend writing if we're above the trickle threshold.
|
||||
/* Throttle writing if we're above the trickle threshold.
|
||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||
* or when the calling thread is about to terminate.
|
||||
* when the calling thread is about to terminate, or once
|
||||
* CRNG_RESEED_INTERVAL has lapsed.
|
||||
*/
|
||||
wait_event_interruptible(random_write_wait,
|
||||
wait_event_interruptible_timeout(random_write_wait,
|
||||
!system_wq || kthread_should_stop() ||
|
||||
POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
|
||||
POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
|
||||
CRNG_RESEED_INTERVAL);
|
||||
mix_pool_bytes(buffer, count);
|
||||
credit_entropy_bits(entropy);
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/dma-heap.h>
|
||||
@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
|
||||
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
|
||||
return -EINVAL;
|
||||
|
||||
nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
|
||||
/* Get the kernel ioctl cmd that matches */
|
||||
kcmd = dma_heap_ioctl_cmds[nr];
|
||||
|
||||
|
@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
|
||||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_acpi_detect(void);
|
||||
#else
|
||||
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_acpi_detect(void) { }
|
||||
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
||||
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||
@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
||||
enum amdgpu_ss ss_state) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
#endif
|
||||
|
||||
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
uint64_t addr, struct amdgpu_bo **bo,
|
||||
struct amdgpu_bo_va_mapping **mapping);
|
||||
|
@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUSPEND)
|
||||
/**
|
||||
* amdgpu_acpi_is_s3_active
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if supported, false if not.
|
||||
*/
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
|
||||
{
|
||||
return !(adev->flags & AMD_IS_APU) ||
|
||||
(pm_suspend_target_state == PM_SUSPEND_MEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
|
||||
*/
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
||||
if (!(adev->flags & AMD_IS_APU) ||
|
||||
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
|
||||
return false;
|
||||
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
|
||||
dev_warn_once(adev->dev,
|
||||
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
|
||||
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !IS_ENABLED(CONFIG_AMD_PMC)
|
||||
dev_warn_once(adev->dev,
|
||||
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif /* CONFIG_AMD_PMC */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
@ -2246,13 +2246,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
|
||||
static int amdgpu_pmops_prepare(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
/* Return a positive number here so
|
||||
* DPM_FLAG_SMART_SUSPEND works properly
|
||||
*/
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
return pm_runtime_suspended(dev) &&
|
||||
pm_suspend_via_firmware();
|
||||
return pm_runtime_suspended(dev);
|
||||
|
||||
/* if we will not support s3 or s2i for the device
|
||||
* then skip suspend
|
||||
*/
|
||||
if (!amdgpu_acpi_is_s0ix_active(adev) &&
|
||||
!amdgpu_acpi_is_s3_active(adev))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1904,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (direct_submit && !ring->sched.ready) {
|
||||
if (!direct_submit && !ring->sched.ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1140,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
|
||||
return;
|
||||
|
||||
adev->mmhub.funcs->get_clockgating(adev, flags);
|
||||
|
||||
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
|
||||
|
@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 7.95,
|
||||
.sr_enter_plus_exit_time_us = 9,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
|
@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
|
||||
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table = {
|
||||
static struct wm_table ddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 6.09,
|
||||
.sr_enter_plus_exit_time_us = 7.14,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
|
||||
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
|
||||
dcn31_bw_params.wm_table = lpddr5_wm_table;
|
||||
} else {
|
||||
dcn31_bw_params.wm_table = ddr4_wm_table;
|
||||
dcn31_bw_params.wm_table = ddr5_wm_table;
|
||||
}
|
||||
/* Saved clocks configured at boot for debug purposes */
|
||||
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
dp_hw_fw_revision.ieee_fw_rev,
|
||||
sizeof(dp_hw_fw_revision.ieee_fw_rev));
|
||||
|
||||
/* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
|
||||
{
|
||||
uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
|
||||
uint8_t fwrev_mbp_2018[] = { 7, 4 };
|
||||
uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
|
||||
|
||||
/* We also check for the firmware revision as 16,1 models have an
|
||||
* identical device id and are incorrectly quirked otherwise.
|
||||
*/
|
||||
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
|
||||
sizeof(str_mbp_2018)) &&
|
||||
(!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
|
||||
sizeof(fwrev_mbp_2018)) ||
|
||||
!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
|
||||
sizeof(fwrev_mbp_2018_vega)))) {
|
||||
link->reported_link_cap.link_rate = LINK_RATE_RBR2;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&link->dpcd_caps.dsc_caps, '\0',
|
||||
sizeof(link->dpcd_caps.dsc_caps));
|
||||
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
|
||||
|
@ -1608,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
|
||||
if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
|
||||
pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
|
||||
pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
|
||||
|
||||
|
@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
|
||||
|
||||
}
|
||||
|
||||
void enc1_stream_encoder_reset_fifo(
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
/* set DIG_START to 0x1 to reset FIFO */
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
|
||||
udelay(100);
|
||||
|
||||
/* write 0 to take the FIFO out of reset */
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
|
||||
}
|
||||
|
||||
void enc1_stream_encoder_dp_blank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc)
|
||||
@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
|
||||
enc1_stream_encoder_send_immediate_sdp_message,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
|
||||
void enc1_stream_encoder_stop_dp_info_packets(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void enc1_stream_encoder_reset_fifo(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void enc1_stream_encoder_dp_blank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc);
|
||||
|
@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
||||
enc1_stream_encoder_send_immediate_sdp_message,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
|
||||
enc3_stream_encoder_update_dp_info_packets,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
@ -164,10 +164,6 @@ struct stream_encoder_funcs {
|
||||
void (*stop_dp_info_packets)(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void (*reset_fifo)(
|
||||
struct stream_encoder *enc
|
||||
);
|
||||
|
||||
void (*dp_blank)(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc);
|
||||
|
@ -3696,14 +3696,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
||||
|
||||
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
PPTable_t *smc_pptable = table_context->driver_pptable;
|
||||
uint16_t *mgpu_fan_boost_limit_rpm;
|
||||
|
||||
GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
|
||||
/*
|
||||
* Skip the MGpuFanBoost setting for those ASICs
|
||||
* which do not support it
|
||||
*/
|
||||
if (!smc_pptable->MGpuFanBoostLimitRpm)
|
||||
if (*mgpu_fan_boost_limit_rpm == 0)
|
||||
return 0;
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
|
@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
|
||||
const struct intel_crtc_state *pipe_config =
|
||||
overlay->crtc->config;
|
||||
|
||||
if (rec->dst_height == 0 || rec->dst_width == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (rec->dst_x < pipe_config->pipe_src_w &&
|
||||
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
|
||||
rec->dst_y < pipe_config->pipe_src_h &&
|
||||
|
@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
|
||||
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
u32 val;
|
||||
|
||||
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
|
||||
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
|
||||
if (val == 0xffffffff) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY in TCCOLD, assuming not complete\n",
|
||||
|
@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
|
||||
timeout) < 0) {
|
||||
i915_request_put(rq);
|
||||
|
||||
tl = intel_context_timeline_lock(ce);
|
||||
/*
|
||||
* Error path, cannot use intel_context_timeline_lock as
|
||||
* that is user interruptable and this clean up step
|
||||
* must be done.
|
||||
*/
|
||||
mutex_lock(&ce->timeline->mutex);
|
||||
intel_context_exit(ce);
|
||||
intel_context_timeline_unlock(tl);
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
|
||||
if (nonblock)
|
||||
return -EWOULDBLOCK;
|
||||
|
@ -206,6 +206,11 @@ struct intel_guc {
|
||||
* context usage for overflows.
|
||||
*/
|
||||
struct delayed_work work;
|
||||
|
||||
/**
|
||||
* @shift: Right shift value for the gpm timestamp
|
||||
*/
|
||||
u32 shift;
|
||||
} timestamp;
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
|
@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
|
||||
if (new_start == lower_32_bits(*prev_start))
|
||||
return;
|
||||
|
||||
/*
|
||||
* When gt is unparked, we update the gt timestamp and start the ping
|
||||
* worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
|
||||
* is unparked, all switched in contexts will have a start time that is
|
||||
* within +/- POLL_TIME_CLKS of the most recent gt_stamp.
|
||||
*
|
||||
* If neither gt_stamp nor new_start has rolled over, then the
|
||||
* gt_stamp_hi does not need to be adjusted, however if one of them has
|
||||
* rolled over, we need to adjust gt_stamp_hi accordingly.
|
||||
*
|
||||
* The below conditions address the cases of new_start rollover and
|
||||
* gt_stamp_last rollover respectively.
|
||||
*/
|
||||
if (new_start < gt_stamp_last &&
|
||||
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
|
||||
gt_stamp_hi++;
|
||||
@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
|
||||
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
|
||||
}
|
||||
|
||||
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||
/*
|
||||
* GuC updates shared memory and KMD reads it. Since this is not synchronized,
|
||||
* we run into a race where the value read is inconsistent. Sometimes the
|
||||
* inconsistency is in reading the upper MSB bytes of the last_in value when
|
||||
* this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
|
||||
* 24 bits are zero. Since these are non-zero values, it is non-trivial to
|
||||
* determine validity of these values. Instead we read the values multiple times
|
||||
* until they are consistent. In test runs, 3 attempts results in consistent
|
||||
* values. The upper bound is set to 6 attempts and may need to be tuned as per
|
||||
* any new occurences.
|
||||
*/
|
||||
static void __get_engine_usage_record(struct intel_engine_cs *engine,
|
||||
u32 *last_in, u32 *id, u32 *total)
|
||||
{
|
||||
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
*last_in = READ_ONCE(rec->last_switch_in_stamp);
|
||||
*id = READ_ONCE(rec->current_context_index);
|
||||
*total = READ_ONCE(rec->total_runtime);
|
||||
|
||||
if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
|
||||
READ_ONCE(rec->current_context_index) == *id &&
|
||||
READ_ONCE(rec->total_runtime) == *total)
|
||||
break;
|
||||
} while (++i < 6);
|
||||
}
|
||||
|
||||
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_guc_stats *stats = &engine->stats.guc;
|
||||
struct intel_guc *guc = &engine->gt->uc.guc;
|
||||
u32 last_switch = rec->last_switch_in_stamp;
|
||||
u32 ctx_id = rec->current_context_index;
|
||||
u32 total = rec->total_runtime;
|
||||
u32 last_switch, ctx_id, total;
|
||||
|
||||
lockdep_assert_held(&guc->timestamp.lock);
|
||||
|
||||
__get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
|
||||
|
||||
stats->running = ctx_id != ~0U && last_switch;
|
||||
if (stats->running)
|
||||
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
|
||||
@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||
}
|
||||
}
|
||||
|
||||
static void guc_update_pm_timestamp(struct intel_guc *guc,
|
||||
struct intel_engine_cs *engine,
|
||||
ktime_t *now)
|
||||
static u32 gpm_timestamp_shift(struct intel_gt *gt)
|
||||
{
|
||||
u32 gt_stamp_now, gt_stamp_hi;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 reg, shift;
|
||||
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
|
||||
|
||||
shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
|
||||
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
|
||||
|
||||
return 3 - shift;
|
||||
}
|
||||
|
||||
static u64 gpm_timestamp(struct intel_gt *gt)
|
||||
{
|
||||
u32 lo, hi, old_hi, loop = 0;
|
||||
|
||||
hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
|
||||
do {
|
||||
lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
|
||||
old_hi = hi;
|
||||
hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
|
||||
} while (old_hi != hi && loop++ < 2);
|
||||
|
||||
return ((u64)hi << 32) | lo;
|
||||
}
|
||||
|
||||
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
u32 gt_stamp_lo, gt_stamp_hi;
|
||||
u64 gpm_ts;
|
||||
|
||||
lockdep_assert_held(&guc->timestamp.lock);
|
||||
|
||||
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
|
||||
gt_stamp_now = intel_uncore_read(engine->uncore,
|
||||
RING_TIMESTAMP(engine->mmio_base));
|
||||
gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
|
||||
gt_stamp_lo = lower_32_bits(gpm_ts);
|
||||
*now = ktime_get();
|
||||
|
||||
if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
|
||||
if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
|
||||
gt_stamp_hi++;
|
||||
|
||||
guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
|
||||
guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
|
||||
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
|
||||
stats_saved = *stats;
|
||||
gt_stamp_saved = guc->timestamp.gt_stamp;
|
||||
/*
|
||||
* Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
|
||||
* start_gt_clk' calculation below for active engines.
|
||||
*/
|
||||
guc_update_engine_gt_clks(engine);
|
||||
guc_update_pm_timestamp(guc, engine, now);
|
||||
guc_update_pm_timestamp(guc, now);
|
||||
intel_gt_pm_put_async(gt);
|
||||
if (i915_reset_count(gpu_error) != reset_count) {
|
||||
*stats = stats_saved;
|
||||
@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
for_each_engine(engine, gt, id) {
|
||||
guc_update_pm_timestamp(guc, engine, &unused);
|
||||
guc_update_engine_gt_clks(engine);
|
||||
engine->stats.guc.prev_total = 0;
|
||||
}
|
||||
@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
|
||||
ktime_t unused;
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
for_each_engine(engine, gt, id) {
|
||||
guc_update_pm_timestamp(guc, engine, &unused);
|
||||
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
for_each_engine(engine, gt, id)
|
||||
guc_update_engine_gt_clks(engine);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
}
|
||||
|
||||
@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
|
||||
void intel_guc_busyness_unpark(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_guc *guc = >->uc.guc;
|
||||
unsigned long flags;
|
||||
ktime_t unused;
|
||||
|
||||
if (!guc_submission_initialized(guc))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
|
||||
guc->timestamp.ping_delay);
|
||||
}
|
||||
@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
|
||||
spin_lock_init(&guc->timestamp.lock);
|
||||
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
|
||||
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
|
||||
guc->timestamp.shift = gpm_timestamp_shift(gt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
|
||||
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
|
||||
if (!ee)
|
||||
return NULL;
|
||||
|
||||
|
@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
|
||||
|
||||
#define GUCPMTIMESTAMP _MMIO(0xC3E8)
|
||||
#define MISC_STATUS0 _MMIO(0xA500)
|
||||
#define MISC_STATUS1 _MMIO(0xA504)
|
||||
|
||||
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
|
||||
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
|
||||
|
@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
|
||||
case LAYER_1:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
|
||||
break;
|
||||
case LAYER_2:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
|
||||
break;
|
||||
case LAYER_3:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
|
||||
break;
|
||||
}
|
||||
|
||||
kmb->plane_status[plane_id].disable = true;
|
||||
|
@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
bridge_state =
|
||||
drm_atomic_get_new_bridge_state(state,
|
||||
mxsfb->bridge);
|
||||
bus_format = bridge_state->input_bus_cfg.format;
|
||||
if (!bridge_state)
|
||||
bus_format = MEDIA_BUS_FMT_FIXED;
|
||||
else
|
||||
bus_format = bridge_state->input_bus_cfg.format;
|
||||
|
||||
if (bus_format == MEDIA_BUS_FMT_FIXED) {
|
||||
dev_warn_once(drm->dev,
|
||||
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
|
||||
|
@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
|
||||
*addr += bios->imaged_addr;
|
||||
}
|
||||
|
||||
if (unlikely(*addr + size >= bios->size)) {
|
||||
if (unlikely(*addr + size > bios->size)) {
|
||||
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
|
||||
return false;
|
||||
}
|
||||
|
@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work)
|
||||
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
|
||||
if (ret) {
|
||||
rdma_destroy_ah_attr(&ah_attr);
|
||||
return -EINVAL;
|
||||
goto deref;
|
||||
}
|
||||
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
|
@ -67,8 +67,8 @@ static const char * const cma_events[] = {
|
||||
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
|
||||
};
|
||||
|
||||
static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
|
||||
union ib_gid *mgid);
|
||||
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
|
||||
enum ib_gid_type gid_type);
|
||||
|
||||
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
|
||||
{
|
||||
@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
|
||||
if (dev_addr->bound_dev_if)
|
||||
ndev = dev_get_by_index(dev_addr->net,
|
||||
dev_addr->bound_dev_if);
|
||||
if (ndev) {
|
||||
if (ndev && !send_only) {
|
||||
enum ib_gid_type gid_type;
|
||||
union ib_gid mgid;
|
||||
|
||||
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
|
||||
&mgid);
|
||||
|
||||
if (!send_only)
|
||||
cma_igmp_send(ndev, &mgid, false);
|
||||
|
||||
dev_put(ndev);
|
||||
gid_type = id_priv->cma_dev->default_gid_type
|
||||
[id_priv->id.port_num -
|
||||
rdma_start_port(
|
||||
id_priv->cma_dev->device)];
|
||||
cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
|
||||
gid_type);
|
||||
cma_igmp_send(ndev, &mgid, false);
|
||||
}
|
||||
dev_put(ndev);
|
||||
|
||||
cancel_work_sync(&mc->iboe_join.work);
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ struct ucma_context {
|
||||
u64 uid;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head mc_list;
|
||||
struct work_struct close_work;
|
||||
};
|
||||
|
||||
@ -105,6 +106,7 @@ struct ucma_multicast {
|
||||
|
||||
u64 uid;
|
||||
u8 join_state;
|
||||
struct list_head list;
|
||||
struct sockaddr_storage addr;
|
||||
};
|
||||
|
||||
@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
||||
|
||||
INIT_WORK(&ctx->close_work, ucma_close_id);
|
||||
init_completion(&ctx->comp);
|
||||
INIT_LIST_HEAD(&ctx->mc_list);
|
||||
/* So list_del() will work if we don't do ucma_finish_ctx() */
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
ctx->file = file;
|
||||
@ -484,19 +487,19 @@ err1:
|
||||
|
||||
static void ucma_cleanup_multicast(struct ucma_context *ctx)
|
||||
{
|
||||
struct ucma_multicast *mc;
|
||||
unsigned long index;
|
||||
struct ucma_multicast *mc, *tmp;
|
||||
|
||||
xa_for_each(&multicast_table, index, mc) {
|
||||
if (mc->ctx != ctx)
|
||||
continue;
|
||||
xa_lock(&multicast_table);
|
||||
list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
|
||||
list_del(&mc->list);
|
||||
/*
|
||||
* At this point mc->ctx->ref is 0 so the mc cannot leave the
|
||||
* lock on the reader and this is enough serialization
|
||||
*/
|
||||
xa_erase(&multicast_table, index);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
kfree(mc);
|
||||
}
|
||||
xa_unlock(&multicast_table);
|
||||
}
|
||||
|
||||
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
|
||||
@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
||||
mc->uid = cmd->uid;
|
||||
memcpy(&mc->addr, addr, cmd->addr_size);
|
||||
|
||||
if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
|
||||
xa_lock(&multicast_table);
|
||||
if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
|
||||
GFP_KERNEL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_mc;
|
||||
}
|
||||
|
||||
list_add_tail(&mc->list, &ctx->mc_list);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
|
||||
join_state, mc);
|
||||
@ -1500,8 +1507,11 @@ err_leave_multicast:
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_cleanup_mc_events(mc);
|
||||
err_xa_erase:
|
||||
xa_erase(&multicast_table, mc->id);
|
||||
xa_lock(&multicast_table);
|
||||
list_del(&mc->list);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
err_free_mc:
|
||||
xa_unlock(&multicast_table);
|
||||
kfree(mc);
|
||||
err_put_ctx:
|
||||
ucma_put_ctx(ctx);
|
||||
@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
|
||||
mc = ERR_PTR(-EINVAL);
|
||||
else if (!refcount_inc_not_zero(&mc->ctx->ref))
|
||||
mc = ERR_PTR(-ENXIO);
|
||||
else
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
if (IS_ERR(mc)) {
|
||||
xa_unlock(&multicast_table);
|
||||
ret = PTR_ERR(mc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&mc->list);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
mutex_lock(&mc->ctx->mutex);
|
||||
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
|
||||
mutex_unlock(&mc->ctx->mutex);
|
||||
|
@ -55,7 +55,7 @@ union hfi1_ipoib_flow {
|
||||
*/
|
||||
struct ipoib_txreq {
|
||||
struct sdma_txreq txreq;
|
||||
struct hfi1_sdma_header sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr;
|
||||
int sdma_status;
|
||||
int complete;
|
||||
struct hfi1_ipoib_dev_priv *priv;
|
||||
|
@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
|
||||
int ret;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = priv->netdev_ops->ndo_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_ret;
|
||||
|
||||
ret = hfi1_netdev_add_data(priv->dd,
|
||||
qpn_from_mac(priv->netdev->dev_addr),
|
||||
dev);
|
||||
if (ret < 0) {
|
||||
priv->netdev_ops->ndo_uninit(dev);
|
||||
return ret;
|
||||
goto out_ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_ret:
|
||||
free_percpu(dev->tstats);
|
||||
dev->tstats = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
|
||||
|
||||
free_percpu(dev->tstats);
|
||||
dev->tstats = NULL;
|
||||
|
||||
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
|
||||
|
||||
priv->netdev_ops->ndo_uninit(dev);
|
||||
@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
|
||||
hfi1_ipoib_rxq_deinit(priv->netdev);
|
||||
|
||||
free_percpu(dev->tstats);
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
|
||||
{
|
||||
hfi1_ipoib_netdev_dtor(dev);
|
||||
free_netdev(dev);
|
||||
dev->tstats = NULL;
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
|
||||
@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
|
||||
priv->port_num = port_num;
|
||||
priv->netdev_ops = netdev->netdev_ops;
|
||||
|
||||
netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
|
||||
|
||||
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
|
||||
|
||||
rc = hfi1_ipoib_txreq_init(priv);
|
||||
if (rc) {
|
||||
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
|
||||
hfi1_ipoib_free_rdma_netdev(netdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = hfi1_ipoib_rxq_init(netdev);
|
||||
if (rc) {
|
||||
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
|
||||
hfi1_ipoib_free_rdma_netdev(netdev);
|
||||
hfi1_ipoib_txreq_deinit(priv);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
|
||||
|
||||
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
|
||||
netdev->needs_free_netdev = true;
|
||||
|
||||
|
@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
||||
dd_dev_warn(priv->dd,
|
||||
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
|
||||
__func__, tx->sdma_status,
|
||||
le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
|
||||
le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
|
||||
tx->txq->sde->this_idx);
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
|
||||
{
|
||||
struct hfi1_devdata *dd = txp->dd;
|
||||
struct sdma_txreq *txreq = &tx->txreq;
|
||||
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
|
||||
u16 pkt_bytes =
|
||||
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
|
||||
int ret;
|
||||
@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
|
||||
struct ipoib_txparms *txp)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
|
||||
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
|
||||
struct sk_buff *skb = tx->skb;
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
|
||||
struct rdma_ah_attr *ah_attr = txp->ah_attr;
|
||||
@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
|
||||
if (likely(!ret)) {
|
||||
tx_ok:
|
||||
trace_sdma_output_ibhdr(txq->priv->dd,
|
||||
&tx->sdma_hdr.hdr,
|
||||
&tx->sdma_hdr->hdr,
|
||||
ib_is_sc5(txp->flow.sc5));
|
||||
hfi1_ipoib_check_queue_depth(txq);
|
||||
return NETDEV_TX_OK;
|
||||
@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
|
||||
hfi1_ipoib_check_queue_depth(txq);
|
||||
|
||||
trace_sdma_output_ibhdr(txq->priv->dd,
|
||||
&tx->sdma_hdr.hdr,
|
||||
&tx->sdma_hdr->hdr,
|
||||
ib_is_sc5(txp->flow.sc5));
|
||||
|
||||
if (!netdev_xmit_more())
|
||||
@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
||||
{
|
||||
struct net_device *dev = priv->netdev;
|
||||
u32 tx_ring_size, tx_item_size;
|
||||
int i;
|
||||
struct hfi1_ipoib_circ_buf *tx_ring;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* Ring holds 1 less than tx_ring_size
|
||||
@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
struct ipoib_txreq *tx;
|
||||
|
||||
tx_ring = &txq->tx_ring;
|
||||
iowait_init(&txq->wait,
|
||||
0,
|
||||
hfi1_ipoib_flush_txq,
|
||||
@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
||||
priv->dd->node);
|
||||
|
||||
txq->tx_ring.items =
|
||||
kcalloc_node(tx_ring_size, tx_item_size,
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
kvzalloc_node(array_size(tx_ring_size, tx_item_size),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
if (!txq->tx_ring.items)
|
||||
goto free_txqs;
|
||||
|
||||
txq->tx_ring.max_items = tx_ring_size;
|
||||
txq->tx_ring.shift = ilog2(tx_ring_size);
|
||||
txq->tx_ring.shift = ilog2(tx_item_size);
|
||||
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
|
||||
tx_ring = &txq->tx_ring;
|
||||
for (j = 0; j < tx_ring_size; j++)
|
||||
hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
|
||||
kzalloc_node(sizeof(*tx->sdma_hdr),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
|
||||
netif_tx_napi_add(dev, &txq->napi,
|
||||
hfi1_ipoib_poll_tx_ring,
|
||||
@ -746,7 +754,10 @@ free_txqs:
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
|
||||
netif_napi_del(&txq->napi);
|
||||
kfree(txq->tx_ring.items);
|
||||
tx_ring = &txq->tx_ring;
|
||||
for (j = 0; j < tx_ring_size; j++)
|
||||
kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
|
||||
kvfree(tx_ring->items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
|
||||
|
||||
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
|
||||
|
||||
iowait_cancel_work(&txq->wait);
|
||||
iowait_sdma_drain(&txq->wait);
|
||||
hfi1_ipoib_drain_tx_list(txq);
|
||||
netif_napi_del(&txq->napi);
|
||||
hfi1_ipoib_drain_tx_ring(txq);
|
||||
kfree(txq->tx_ring.items);
|
||||
for (j = 0; j < tx_ring->max_items; j++)
|
||||
kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
|
||||
kvfree(tx_ring->items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
|
@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
||||
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
|
||||
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
|
||||
if (!ew)
|
||||
break;
|
||||
return;
|
||||
|
||||
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
|
||||
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
|
||||
|
@ -3073,6 +3073,8 @@ do_write:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
|
||||
goto inv_err;
|
||||
if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
|
||||
goto inv_err;
|
||||
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
|
||||
wqe->atomic_wr.remote_addr,
|
||||
wqe->atomic_wr.rkey,
|
||||
|
@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
|
||||
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
|
||||
}
|
||||
|
||||
static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
|
||||
{
|
||||
return &qp->orq[qp->orq_put % qp->attrs.orq_size];
|
||||
}
|
||||
|
||||
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
|
||||
{
|
||||
struct siw_sqe *orq_e = orq_get_tail(qp);
|
||||
struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
|
||||
|
||||
if (READ_ONCE(orq_e->flags) == 0)
|
||||
return orq_e;
|
||||
|
@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
||||
|
||||
spin_lock_irqsave(&qp->orq_lock, flags);
|
||||
|
||||
rreq = orq_get_current(qp);
|
||||
|
||||
/* free current orq entry */
|
||||
rreq = orq_get_current(qp);
|
||||
WRITE_ONCE(rreq->flags, 0);
|
||||
|
||||
qp->orq_get++;
|
||||
|
||||
if (qp->tx_ctx.orq_fence) {
|
||||
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
|
||||
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
|
||||
@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
||||
rv = -EPROTO;
|
||||
goto out;
|
||||
}
|
||||
/* resume SQ processing */
|
||||
/* resume SQ processing, if possible */
|
||||
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
|
||||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
|
||||
rreq = orq_get_tail(qp);
|
||||
|
||||
/* SQ processing was stopped because of a full ORQ */
|
||||
rreq = orq_get_free(qp);
|
||||
if (unlikely(!rreq)) {
|
||||
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
|
||||
rv = -EPROTO;
|
||||
@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
||||
resume_tx = 1;
|
||||
|
||||
} else if (siw_orq_empty(qp)) {
|
||||
/*
|
||||
* SQ processing was stopped by fenced work request.
|
||||
* Resume since all previous Read's are now completed.
|
||||
*/
|
||||
qp->tx_ctx.orq_fence = 0;
|
||||
resume_tx = 1;
|
||||
} else {
|
||||
pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
|
||||
qp_id(qp), qp->orq_get, qp->orq_put);
|
||||
rv = -EPROTO;
|
||||
}
|
||||
}
|
||||
qp->orq_get++;
|
||||
out:
|
||||
spin_unlock_irqrestore(&qp->orq_lock, flags);
|
||||
|
||||
|
@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
||||
|
||||
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
|
||||
siw_dbg(base_dev, "too many QP's\n");
|
||||
return -ENOMEM;
|
||||
rv = -ENOMEM;
|
||||
goto err_atomic;
|
||||
}
|
||||
if (attrs->qp_type != IB_QPT_RC) {
|
||||
siw_dbg(base_dev, "only RC QP's supported\n");
|
||||
|
@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm)
|
||||
* extensions)
|
||||
*/
|
||||
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
|
||||
if (!wm->touch_dev) {
|
||||
ret = -ENOMEM;
|
||||
goto touch_err;
|
||||
}
|
||||
if (!wm->touch_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(wm->touch_dev, wm);
|
||||
wm->touch_dev->dev.parent = wm->dev;
|
||||
wm->touch_dev->dev.platform_data = pdata;
|
||||
@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm)
|
||||
return 0;
|
||||
touch_reg_err:
|
||||
platform_device_put(wm->touch_dev);
|
||||
touch_err:
|
||||
input_unregister_device(wm->input_dev);
|
||||
wm->input_dev = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -639,8 +635,6 @@ touch_err:
|
||||
static void wm97xx_unregister_touch(struct wm97xx *wm)
|
||||
{
|
||||
platform_device_unregister(wm->touch_dev);
|
||||
input_unregister_device(wm->input_dev);
|
||||
wm->input_dev = NULL;
|
||||
}
|
||||
|
||||
static int _wm97xx_probe(struct wm97xx *wm)
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/apic.h>
|
||||
@ -834,6 +835,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
if (WARN_ON(i >= LOOP_TIMEOUT))
|
||||
|
@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
fn, &intel_ir_domain_ops,
|
||||
iommu);
|
||||
if (!iommu->ir_domain) {
|
||||
irq_domain_free_fwnode(fn);
|
||||
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
|
||||
goto out_free_bitmap;
|
||||
goto out_free_fwnode;
|
||||
}
|
||||
iommu->ir_msi_domain =
|
||||
arch_create_remap_msi_irq_domain(iommu->ir_domain,
|
||||
@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
|
||||
if (dmar_enable_qi(iommu)) {
|
||||
pr_err("Failed to enable queued invalidation\n");
|
||||
goto out_free_bitmap;
|
||||
goto out_free_ir_domain;
|
||||
}
|
||||
}
|
||||
|
||||
@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_ir_domain:
|
||||
if (iommu->ir_msi_domain)
|
||||
irq_domain_remove(iommu->ir_msi_domain);
|
||||
iommu->ir_msi_domain = NULL;
|
||||
irq_domain_remove(iommu->ir_domain);
|
||||
iommu->ir_domain = NULL;
|
||||
out_free_fwnode:
|
||||
irq_domain_free_fwnode(fn);
|
||||
out_free_bitmap:
|
||||
bitmap_free(bitmap);
|
||||
out_free_pages:
|
||||
|
@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc);
|
||||
|
||||
/**
|
||||
* ioasid_get - obtain a reference to the IOASID
|
||||
* @ioasid: the ID to get
|
||||
*/
|
||||
void ioasid_get(ioasid_t ioasid)
|
||||
{
|
||||
|
@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
|
||||
|
||||
static void dev_iommu_free(struct device *dev)
|
||||
{
|
||||
iommu_fwspec_free(dev);
|
||||
kfree(dev->iommu);
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
|
||||
dev->iommu = NULL;
|
||||
if (param->fwspec) {
|
||||
fwnode_handle_put(param->fwspec->iommu_fwnode);
|
||||
kfree(param->fwspec);
|
||||
}
|
||||
kfree(param);
|
||||
}
|
||||
|
||||
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
||||
@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_for_each_dev - iterate over each device in the group
|
||||
* @group: the group
|
||||
* @data: caller opaque data to be passed to callback function
|
||||
* @fn: caller supplied callback function
|
||||
*
|
||||
* This function is called by group users to iterate over group devices.
|
||||
* Callers should hold a reference count to the group during callback.
|
||||
* The group->mutex is held across callbacks, which will block calls to
|
||||
* iommu_group_add/remove_device.
|
||||
*/
|
||||
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* iommu_group_for_each_dev - iterate over each device in the group
|
||||
* @group: the group
|
||||
* @data: caller opaque data to be passed to callback function
|
||||
* @fn: caller supplied callback function
|
||||
*
|
||||
* This function is called by group users to iterate over group devices.
|
||||
* Callers should hold a reference count to the group during callback.
|
||||
* The group->mutex is held across callbacks, which will block calls to
|
||||
* iommu_group_add/remove_device.
|
||||
*/
|
||||
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
||||
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||
* @dev: the device
|
||||
* @mm: the mm to bind, caller must hold a reference to it
|
||||
* @drvdata: opaque data pointer to pass to bind callback
|
||||
*
|
||||
* Create a bond between device and address space, allowing the device to access
|
||||
* the mm using the returned PASID. If a bond already exists between @device and
|
||||
|
@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
|
||||
* omap_iommu_prepare - prepare() dev_pm_ops implementation
|
||||
* @dev: iommu device
|
||||
*
|
||||
* This function performs the necessary checks to determine if the IOMMU
|
||||
|
@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev)
|
||||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
}
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev)
|
||||
else
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
}
|
||||
if (pers->sync_request) {
|
||||
if (mddev->kobj.sd &&
|
||||
|
@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP
|
||||
config NET_DSA_MT7530
|
||||
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
|
||||
select NET_DSA_TAG_MTK
|
||||
select MEDIATEK_GE_PHY
|
||||
help
|
||||
This enables support for the MediaTek MT7530, MT7531, and MT7621
|
||||
Ethernet switch chips.
|
||||
|
@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
|
||||
if (!channel->tx_ring)
|
||||
break;
|
||||
|
||||
/* Deactivate the Tx timer */
|
||||
del_timer_sync(&channel->tx_timer);
|
||||
channel->tx_timer_active = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2550,6 +2552,14 @@ read_again:
|
||||
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
|
||||
len += buf2_len;
|
||||
|
||||
if (buf2_len > rdata->rx.buf.dma_len) {
|
||||
/* Hardware inconsistency within the descriptors
|
||||
* that has resulted in a length underflow.
|
||||
*/
|
||||
error = 1;
|
||||
goto skip_data;
|
||||
}
|
||||
|
||||
if (!skb) {
|
||||
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||
buf1_len);
|
||||
@ -2579,8 +2589,10 @@ skip_data:
|
||||
if (!last || context_next)
|
||||
goto read_again;
|
||||
|
||||
if (!skb)
|
||||
if (!skb || error) {
|
||||
dev_kfree_skb(skb);
|
||||
goto next_packet;
|
||||
}
|
||||
|
||||
/* Be sure we don't exceed the configured MTU */
|
||||
max_len = netdev->mtu + ETH_HLEN;
|
||||
|
@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
|
||||
*/
|
||||
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
|
||||
{
|
||||
u32 tail, head;
|
||||
int tail, head;
|
||||
int i;
|
||||
|
||||
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
|
||||
|
@ -115,7 +115,8 @@ enum e1000_boards {
|
||||
board_pch_lpt,
|
||||
board_pch_spt,
|
||||
board_pch_cnp,
|
||||
board_pch_tgp
|
||||
board_pch_tgp,
|
||||
board_pch_adp
|
||||
};
|
||||
|
||||
struct e1000_ps_page {
|
||||
@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info;
|
||||
extern const struct e1000_info e1000_pch_spt_info;
|
||||
extern const struct e1000_info e1000_pch_cnp_info;
|
||||
extern const struct e1000_info e1000_pch_tgp_info;
|
||||
extern const struct e1000_info e1000_pch_adp_info;
|
||||
extern const struct e1000_info e1000_es2_info;
|
||||
|
||||
void e1000e_ptp_init(struct e1000_adapter *adapter);
|
||||
|
@ -6021,3 +6021,23 @@ const struct e1000_info e1000_pch_tgp_info = {
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
||||
const struct e1000_info e1000_pch_adp_info = {
|
||||
.mac = e1000_pch_adp,
|
||||
.flags = FLAG_IS_ICH
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_HAS_HW_TIMESTAMP
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_APME_IN_WUC,
|
||||
.flags2 = FLAG2_HAS_PHY_STATS
|
||||
| FLAG2_HAS_EEE,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 9022,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
|
||||
[board_pch_spt] = &e1000_pch_spt_info,
|
||||
[board_pch_cnp] = &e1000_pch_cnp_info,
|
||||
[board_pch_tgp] = &e1000_pch_tgp_info,
|
||||
[board_pch_adp] = &e1000_pch_adp_info,
|
||||
};
|
||||
|
||||
struct e1000_reg_info {
|
||||
@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
|
||||
u32 mac_data;
|
||||
u16 phy_data;
|
||||
|
||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
|
||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
|
||||
hw->mac.type >= e1000_pch_adp) {
|
||||
/* Request ME configure the device for S0ix */
|
||||
mac_data = er32(H2ME);
|
||||
mac_data |= E1000_H2ME_START_DPG;
|
||||
@ -6490,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
|
||||
u16 phy_data;
|
||||
u32 i = 0;
|
||||
|
||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
|
||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
|
||||
hw->mac.type >= e1000_pch_adp) {
|
||||
/* Request ME unconfigure the device from S0ix */
|
||||
mac_data = er32(H2ME);
|
||||
mac_data &= ~E1000_H2ME_START_DPG;
|
||||
@ -7898,22 +7901,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
|
||||
|
||||
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
|
||||
};
|
||||
|
@ -144,6 +144,7 @@ enum i40e_state_t {
|
||||
__I40E_VIRTCHNL_OP_PENDING,
|
||||
__I40E_RECOVERY_MODE,
|
||||
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
|
||||
__I40E_IN_REMOVE,
|
||||
__I40E_VFS_RELEASING,
|
||||
/* This must be last as it determines the size of the BITMAP */
|
||||
__I40E_STATE_SIZE__,
|
||||
|
@ -5372,7 +5372,15 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
||||
/* There is no need to reset BW when mqprio mode is on. */
|
||||
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||||
return 0;
|
||||
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
|
||||
|
||||
if (!vsi->mqprio_qopt.qopt.hw) {
|
||||
if (pf->flags & I40E_FLAG_DCB_ENABLED)
|
||||
goto skip_reset;
|
||||
|
||||
if (IS_ENABLED(CONFIG_I40E_DCB) &&
|
||||
i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
|
||||
goto skip_reset;
|
||||
|
||||
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
|
||||
if (ret)
|
||||
dev_info(&pf->pdev->dev,
|
||||
@ -5380,6 +5388,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
||||
vsi->seid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
skip_reset:
|
||||
memset(&bw_data, 0, sizeof(bw_data));
|
||||
bw_data.tc_valid_bits = enabled_tc;
|
||||
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
|
||||
@ -10853,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
|
||||
bool lock_acquired)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return;
|
||||
/* Now we wait for GRST to settle out.
|
||||
* We don't have to delete the VEBs or VSIs from the hw switch
|
||||
* because the reset will make them disappear.
|
||||
@ -12212,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
|
||||
|
||||
vsi->req_queue_pairs = queue_count;
|
||||
i40e_prep_for_reset(pf);
|
||||
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return pf->alloc_rss_size;
|
||||
|
||||
pf->alloc_rss_size = new_rss_size;
|
||||
|
||||
@ -13038,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
||||
if (need_reset)
|
||||
i40e_prep_for_reset(pf);
|
||||
|
||||
/* VSI shall be deleted in a moment, just return EINVAL */
|
||||
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return -EINVAL;
|
||||
|
||||
old_prog = xchg(&vsi->xdp_prog, prog);
|
||||
|
||||
if (need_reset) {
|
||||
@ -15928,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev)
|
||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
|
||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
|
||||
|
||||
while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
||||
/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
|
||||
* flags, once they are set, i40e_rebuild should not be called as
|
||||
* i40e_prep_for_reset always returns early.
|
||||
*/
|
||||
while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
||||
usleep_range(1000, 2000);
|
||||
set_bit(__I40E_IN_REMOVE, pf->state);
|
||||
|
||||
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
|
||||
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
|
||||
@ -16128,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
|
||||
{
|
||||
struct i40e_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||
return;
|
||||
|
||||
i40e_reset_and_rebuild(pf, false, false);
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
||||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
struct mlx5_wqe_data_seg data[0];
|
||||
struct mlx5_wqe_data_seg data[];
|
||||
};
|
||||
|
||||
struct mlx5e_rx_wqe_ll {
|
||||
@ -241,8 +241,8 @@ struct mlx5e_umr_wqe {
|
||||
struct mlx5_wqe_umr_ctrl_seg uctrl;
|
||||
struct mlx5_mkey_seg mkc;
|
||||
union {
|
||||
struct mlx5_mtt inline_mtts[0];
|
||||
struct mlx5_klm inline_klms[0];
|
||||
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
|
||||
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
|
||||
|
||||
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
|
||||
{
|
||||
*max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
|
||||
/* Hardware treats 0 as "unlimited", set at least 1. */
|
||||
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
|
||||
|
||||
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
|
||||
ceil, *max_average_bw);
|
||||
|
@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
|
||||
|
||||
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
/* A given netdev is not a representor or not a slave of LAG configuration */
|
||||
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
|
||||
return false;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv = priv->ppriv;
|
||||
|
||||
/* Egress acl forward to vport is supported only non-uplink representor */
|
||||
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
|
||||
return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
|
||||
}
|
||||
|
||||
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
|
||||
@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
|
||||
u16 fwd_vport_num;
|
||||
int err;
|
||||
|
||||
if (!mlx5e_rep_is_lag_netdev(netdev))
|
||||
return;
|
||||
|
||||
info = ptr;
|
||||
lag_info = info->lower_state_info;
|
||||
/* This is not an event of a representor becoming active slave */
|
||||
@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
|
||||
struct net_device *lag_dev;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (!mlx5e_rep_is_lag_netdev(netdev))
|
||||
return;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv = priv->ppriv;
|
||||
lag_dev = info->upper_dev;
|
||||
@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct mlx5e_rep_bond *bond;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (!mlx5e_rep_is_lag_netdev(netdev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
bond = container_of(nb, struct mlx5e_rep_bond, nb);
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
|
||||
/* Verify VF representor is on the same device of the bond handling the netevent. */
|
||||
if (rpriv->uplink_priv.bond != bond)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_CHANGELOWERSTATE:
|
||||
|
@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
|
||||
}
|
||||
|
||||
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
|
||||
err = register_netdevice_notifier(&br_offloads->netdev_nb);
|
||||
err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
|
||||
if (err) {
|
||||
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
|
||||
err);
|
||||
@ -509,7 +509,9 @@ err_register_swdev_blk:
|
||||
err_register_swdev:
|
||||
destroy_workqueue(br_offloads->wq);
|
||||
err_alloc_wq:
|
||||
rtnl_lock();
|
||||
mlx5_esw_bridge_cleanup(esw);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
||||
@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&br_offloads->update_work);
|
||||
unregister_netdevice_notifier(&br_offloads->netdev_nb);
|
||||
unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
|
||||
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
|
||||
unregister_switchdev_notifier(&br_offloads->nb);
|
||||
destroy_workqueue(br_offloads->wq);
|
||||
|
@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
|
||||
return pi;
|
||||
}
|
||||
|
||||
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
|
||||
}
|
||||
|
||||
struct mlx5e_shampo_umr {
|
||||
u16 len;
|
||||
};
|
||||
|
@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
|
||||
|
||||
/* copy the inline part if required */
|
||||
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
|
||||
memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
|
||||
memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
|
||||
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
|
||||
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
|
||||
MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
|
||||
dma_len -= MLX5E_XDP_MIN_INLINE;
|
||||
dma_addr += MLX5E_XDP_MIN_INLINE;
|
||||
dseg++;
|
||||
|
@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||
/* Tunnel mode */
|
||||
if (mode == XFRM_MODE_TUNNEL) {
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (xo->proto == IPPROTO_IPV6)
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
|
||||
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
/* IP | ESP | IP | [TCP | UDP] */
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -131,14 +131,17 @@ static inline bool
|
||||
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
u8 inner_ipproto;
|
||||
|
||||
if (!mlx5e_ipsec_eseg_meta(eseg))
|
||||
return false;
|
||||
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (xo->inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
|
||||
if (inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user