Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

include/linux/netdevice.h
net/core/dev.c
  6510ea973d ("net: Use this_cpu_inc() to increment net->core_stats")
  794c24e992 ("net-core: rx_otherhost_dropped to core_stats")
https://lore.kernel.org/all/20220428111903.5f4304e0@canb.auug.org.au/

drivers/net/wan/cosa.c
  d48fea8401 ("net: cosa: fix error check return value of register_chrdev()")
  89fbca3307 ("net: wan: remove support for COSA and SRP synchronous serial boards")
https://lore.kernel.org/all/20220428112130.1f689e5e@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-04-28 13:01:50 -07:00
commit 0e55546b18
337 changed files with 2831 additions and 2125 deletions

View File

@ -29,6 +29,7 @@ properties:
interrupts:
description:
Interrupt lines for each GPI instance
minItems: 1
maxItems: 13
"#dma-cells":

View File

@ -58,7 +58,7 @@ properties:
type: object
$ref: regulator.yaml#
description: |
regulator description for buck1 and buck4.
regulator description for buck1 to buck4, and ldo.
properties:
regulator-allowed-modes:

View File

@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
- Checksum of the extended attribute block.
* - 0x14
- \_\_u32
- h\_reserved[2]
- h\_reserved[3]
- Zero.
The checksum is calculated against the FS UUID, the 64-bit block number

View File

@ -235,12 +235,6 @@ offgrpjquota Turn off group journalled quota.
offprjjquota Turn off project journalled quota.
quota Enable plain user disk quota accounting.
noquota Disable all plain disk quota option.
whint_mode=%s Control which write hints are passed down to block
layer. This supports "off", "user-based", and
"fs-based". In "off" mode (default), f2fs does not pass
down hints. In "user-based" mode, f2fs tries to pass
down hints given by users. And in "fs-based" mode, f2fs
passes down hints with its policy.
alloc_mode=%s Adjust block allocation policy, which supports "reuse"
and "default".
fsync_mode=%s Control the policy of fsync. Currently supports "posix",
@ -751,70 +745,6 @@ In order to identify whether the data in the victim segment are valid or not,
F2FS manages a bitmap. Each bit represents the validity of a block, and the
bitmap is composed of a bit stream covering whole blocks in main area.
Write-hint Policy
-----------------
1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
2) whint_mode=user-based. F2FS tries to pass down hints given by
users.
===================== ======================== ===================
User F2FS Block
===================== ======================== ===================
N/A META WRITE_LIFE_NOT_SET
N/A HOT_NODE "
N/A WARM_NODE "
N/A COLD_NODE "
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
extension list " "
-- buffered io
WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
WRITE_LIFE_NONE " "
WRITE_LIFE_MEDIUM " "
WRITE_LIFE_LONG " "
-- direct io
WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
WRITE_LIFE_NONE " WRITE_LIFE_NONE
WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
WRITE_LIFE_LONG " WRITE_LIFE_LONG
===================== ======================== ===================
3) whint_mode=fs-based. F2FS passes down hints with its policy.
===================== ======================== ===================
User F2FS Block
===================== ======================== ===================
N/A META WRITE_LIFE_MEDIUM;
N/A HOT_NODE WRITE_LIFE_NOT_SET
N/A WARM_NODE "
N/A COLD_NODE WRITE_LIFE_NONE
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
extension list " "
-- buffered io
WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
WRITE_LIFE_NONE " "
WRITE_LIFE_MEDIUM " "
WRITE_LIFE_LONG " "
-- direct io
WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
WRITE_LIFE_NONE " WRITE_LIFE_NONE
WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
WRITE_LIFE_LONG " WRITE_LIFE_LONG
===================== ======================== ===================
Fallocate(2) Policy
-------------------

View File

@ -110,7 +110,7 @@ Usage
If you want to sort by the page nums of buf, use the ``-m`` parameter.
The detailed parameters are:
fundamental function:
fundamental function::
Sort:
-a Sort by memory allocation time.
@ -122,7 +122,7 @@ Usage
-s Sort by stack trace.
-t Sort by times (default).
additional function:
additional function::
Cull:
--cull <rules>
@ -153,6 +153,7 @@ Usage
STANDARD FORMAT SPECIFIERS
==========================
::
KEY LONG DESCRIPTION
p pid process ID

View File

@ -3913,7 +3913,9 @@ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <aspriel@gmail.com>
@ -10238,8 +10240,6 @@ F: drivers/net/ethernet/sgi/ioc3-eth.c
IOMAP FILESYSTEM LIBRARY
M: Christoph Hellwig <hch@infradead.org>
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
M: linux-fsdevel@vger.kernel.org
L: linux-xfs@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Supported
@ -10548,6 +10548,7 @@ M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Dmitry Vyukov <dvyukov@google.com>
R: Vincenzo Frascino <vincenzo.frascino@arm.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kasan.rst
@ -13639,6 +13640,7 @@ F: net/core/drop_monitor.c
NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
@ -13686,6 +13688,7 @@ F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
@ -21610,7 +21613,6 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
C: irc://irc.oftc.net/xfs
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
S: Supported
W: http://xfs.org/

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Superb Owl
# *DOCUMENTATION*

View File

@ -275,7 +275,7 @@
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
<&creg_gpio 1 GPIO_ACTIVE_LOW>;
spi-flash@0 {
flash@0 {
compatible = "sst26wf016b", "jedec,spi-nor";
reg = <0>;
#address-cells = <1>;

View File

@ -5,7 +5,7 @@
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
#define ATOMIC_OP(op, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned int val; \
@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op) \
ATOMIC_FETCH_OP(op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, sub)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_FETCH_OP(op, asm_op)
ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
ATOMIC_OPS(and, and)
ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
#define arch_atomic_andnot arch_atomic_andnot

View File

@ -98,9 +98,6 @@
/*
* 1st level paging: pgd
*/
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr) (((mm)->pgd) + pgd_index(addr))
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pgd_ERROR(e) \
pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))

View File

@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not
* cause unaliged exception anyway */
* cause unaligned exception anyway */
state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5);
if (state->zz)
@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
{
struct disasm_state instr;
memset(&instr, 0, sizeof(struct disasm_state));
disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len;

View File

@ -196,6 +196,7 @@ tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above

View File

@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
/*
* handler returns using sigreturn stub provided already by userpsace
* handler returns using sigreturn stub provided already by userspace
* If not, nuke the process right away
*/
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))

View File

@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
struct plat_smp_ops __weak plat_smp_ops;
/* XXX: per cpu ? Only needed once in early seconday boot */
/* XXX: per cpu ? Only needed once in early secondary boot */
struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */
@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
new = old = READ_ONCE(*ipi_data_ptr);
new = old = *ipi_data_ptr;
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);

View File

@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
/* clear any remanants of delay slot */
/* clear any remnants of delay slot */
if (delay_mode(regs)) {
regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;

View File

@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
* INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* So toggle INV sub-mode depending on op request and default
*/

View File

@ -17,7 +17,6 @@ menuconfig ARCH_EXYNOS
select EXYNOS_PMU
select EXYNOS_SROM
select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
select GPIOLIB
select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
select HAVE_ARM_SCU if SMP
select PINCTRL

View File

@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
if (!nr_reg) {
pr_err("No extended regions are found\n");
of_node_put(np);
return -EINVAL;
}
regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
if (!regs)
if (!regs) {
of_node_put(np);
return -ENOMEM;
}
/*
* Create resource from extended regions provided by the hypervisor to be
@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
*res = &xen_resource;
err:
of_node_put(np);
kfree(regs);
return rc;
}
#endif
@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
pr_err("Xen grant table region is not found\n");
of_node_put(xen_node);
return;
}
of_node_put(xen_node);
xen_grant_frames = res.start;
}

View File

@ -175,8 +175,6 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS \
if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@ -228,6 +226,17 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
select HAVE_DYNAMIC_FTRACE_WITH_REGS
config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=2)
select HAVE_DYNAMIC_FTRACE_WITH_REGS
config 64BIT
def_bool y
@ -678,7 +687,7 @@ config ARM64_ERRATUM_2051678
default y
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
Affected Coretex-A510 might not respect the ordering rules for
Affected Cortex-A510 might not respect the ordering rules for
hardware update of the page table's dirty bit. The workaround
is to not enable the feature on affected CPUs.

View File

@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)

View File

@ -615,23 +615,22 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
return;
}
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
* When the PPC_WATCHDOG (decrementer based) is configured,
* keep this at most 31 bits, which is about 4 seconds on most
* systems, which gives the watchdog a chance of catching timer
* interrupt hard lockups.
*/
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
set_dec(0x7fffffff);
else
set_dec(decrementer_max);
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
* When the PPC_WATCHDOG (decrementer based) is configured,
* keep this at most 31 bits, which is about 4 seconds on most
* systems, which gives the watchdog a chance of catching timer
* interrupt hard lockups.
*/
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
set_dec(0x7fffffff);
else
set_dec(decrementer_max);
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable())
do_hard_irq_enable();
}
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)

View File

@ -168,9 +168,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
return -EINVAL;
/* Read the entry from guest memory */
addr = base + (index * sizeof(rpte));
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (ret) {
if (pte_ret_p)
*pte_ret_p = addr;
@ -246,9 +247,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Read the table to find the root of the radix tree */
ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (ret)
return ret;

View File

@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
tbl[idx % TCES_PER_PAGE] = tce;
}
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
goto invalidate_exit;
goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, entry, npages);
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE(1);
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);

View File

@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
tbl->it_ops->tce_kill(tbl, entry, pages, true);
}
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
iommu_tce_kill_rm(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
return ret;
}
}
@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, entry, npages);
unlock_exit:
if (!prereg)
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}

View File

@ -306,10 +306,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_PARAMETER;
@ -410,10 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
byteswap_hv_regs(&l2_hv);
byteswap_pt_regs(&l2_regs);
}
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_AUTHORITY;
@ -600,16 +600,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
goto not_found;
/* Write what was loaded into our buffer back to the L1 guest */
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
} else {
/* Load the data to be stored from the L1 guest into our buf */
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;

View File

@ -229,9 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
*/
args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto fail;

View File

@ -425,9 +425,9 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
return EMULATE_DO_MMIO;

View File

@ -3,11 +3,11 @@
obj-y += callchain.o callchain_$(BITS).o perf_regs.o
obj-$(CONFIG_COMPAT) += callchain_32.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
isa207-common.o power8-pmu.o power9-pmu.o \
generic-compat-pmu.o power10-pmu.o
generic-compat-pmu.o power10-pmu.o bhrb.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o

View File

@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_CYC_ALT, PM_CYC },
{ PM_INST_CMPL_ALT, PM_INST_CMPL },
{ PM_CYC_ALT, PM_CYC },
};
static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])

View File

@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])

View File

@ -38,7 +38,7 @@ config SOC_VIRT
select SIFIVE_PLIC
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select RISCV_SBI_CPUIDLE if CPU_IDLE
select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
help
This enables support for QEMU Virt Machine.

View File

@ -193,9 +193,6 @@ struct kvm_vcpu_arch {
/* Don't run the VCPU (blocked) */
bool pause;
/* SRCU lock index for in-kernel run loop */
int srcu_idx;
};
static inline void kvm_arch_hardware_unsetup(void) {}

View File

@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
#define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
riscv_isa_extension_mask(c) | \
riscv_isa_extension_mask(d) | \
riscv_isa_extension_mask(f) | \
riscv_isa_extension_mask(i) | \
riscv_isa_extension_mask(m) | \
riscv_isa_extension_mask(s) | \
riscv_isa_extension_mask(u))
#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
riscv_isa_extension_mask(f))
#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
riscv_isa_extension_mask(c) | \
riscv_isa_extension_mask(i) | \
riscv_isa_extension_mask(m))
#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
if (!vcpu->arch.ran_atleast_once) {
vcpu->arch.isa = reg_val;
/* Ignore the disable request for these extensions */
vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
vcpu->arch.isa &= riscv_isa_extension_base(NULL);
vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
kvm_riscv_vcpu_fp_reset(vcpu);
@ -724,13 +727,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
/* Process MMIO value returned from user-space */
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
if (ret) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
}
@ -739,13 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
if (ret) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
}
if (run->immediate_exit) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
return -EINTR;
}
@ -784,7 +787,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
smp_mb__after_srcu_read_unlock();
/*
@ -802,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@ -846,7 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_enable();
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
}
@ -855,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}

View File

@ -456,9 +456,9 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}

View File

@ -718,6 +718,7 @@ retry:
if (!check_l4) {
disable_pgtable_l5();
check_l4 = true;
memset(early_pg_dir, 0, PAGE_SIZE);
goto retry;
}
disable_pgtable_l4();

View File

@ -1334,11 +1334,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0;

View File

@ -4237,14 +4237,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
* We try to hold kvm->srcu during most of vcpu_run (except when run-
* ning the guest), so that memslots (and other stuff) are protected
*/
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
do {
rc = vcpu_pre_run(vcpu);
if (rc)
break;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
/*
* As PF_VCPU will be used in fault handler, between
* guest_enter and guest_exit should be no uaccess.
@ -4281,12 +4281,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
__enable_cpu_timer_accounting(vcpu);
guest_exit_irqoff();
local_irq_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
rc = vcpu_post_run(vcpu, exit_reason);
} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
return rc;
}

View File

@ -1091,7 +1091,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
handle_last_fault(vcpu, vsie_page);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
/* save current guest state of bp isolation override */
guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
@ -1133,7 +1133,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (!guest_bp_isolation)
clear_thread_flag(TIF_ISOLATE_BP_GUEST);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
if (rc == -EINTR) {
VCPU_EVENT(vcpu, 3, "%s", "machine check");

View File

@ -35,6 +35,7 @@
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
struct page;
void sparc_flush_page_to_ram(struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1

View File

@ -51,7 +51,7 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
* TGL,TNT,RKL,ADL,RPL
* TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@ -62,7 +62,7 @@
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
* RPL
* RPL,SPR
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@ -74,7 +74,7 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
* TGL,TNT,RKL,ADL,RPL
* TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),

View File

@ -118,6 +118,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_register_region)
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
KVM_X86_OP(get_msr_feature)
KVM_X86_OP(can_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)

View File

@ -1484,6 +1484,7 @@ struct kvm_x86_ops {
int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
void (*guest_memory_reclaimed)(struct kvm *kvm);
int (*get_msr_feature)(struct kvm_msr_entry *entry);

View File

@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
static int has_guest_poll = 0;
/*
* No need for any "IO delay" on KVM
*/
@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
static int kvm_suspend(void)
{
u64 val = 0;
kvm_guest_cpu_offline(false);
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
rdmsrl(MSR_KVM_POLL_CONTROL, val);
has_guest_poll = !(val & 1);
#endif
return 0;
}
static void kvm_resume(void)
{
kvm_cpu_online(raw_smp_processor_id());
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
wrmsrl(MSR_KVM_POLL_CONTROL, 0);
#endif
}
static struct syscore_ops kvm_syscore_ops = {

View File

@ -138,6 +138,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
return sample_period;
}
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
{
if (!pmc->perf_event || pmc->is_paused)
return;
perf_event_period(pmc->perf_event,
get_sample_period(pmc, pmc->counter));
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);

View File

@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
return 0;
}
/* MSR_EVNTSELn */

View File

@ -2226,51 +2226,47 @@ int sev_cpu_init(struct svm_cpu_data *sd)
* Pages used by hardware to hold guest encrypted state must be flushed before
* returning them to the system.
*/
static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
unsigned long len)
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{
int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
/*
* If hardware enforced cache coherency for encrypted mappings of the
* same physical page is supported, nothing to do.
* Note! The address must be a kernel address, as regular page walk
* checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
* address is non-deterministic and unsafe. This function deliberately
* takes a pointer to deter passing in a user address.
*/
if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
unsigned long addr = (unsigned long)va;
/*
* If CPU enforced cache coherency for encrypted mappings of the
* same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
* flush is still needed in order to work properly with DMA devices.
*/
if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
clflush_cache_range(va, PAGE_SIZE);
return;
/*
* If the VM Page Flush MSR is supported, use it to flush the page
* (using the page virtual address and the guest ASID).
*/
if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
struct kvm_sev_info *sev;
unsigned long va_start;
u64 start, stop;
/* Align start and stop to page boundaries. */
va_start = (unsigned long)va;
start = (u64)va_start & PAGE_MASK;
stop = PAGE_ALIGN((u64)va_start + len);
if (start < stop) {
sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
while (start < stop) {
wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
start | sev->asid);
start += PAGE_SIZE;
}
return;
}
WARN(1, "Address overflow, using WBINVD\n");
}
/*
* Hardware should always have one of the above features,
* but if not, use WBINVD and issue a warning.
* VM Page Flush takes a host virtual address and a guest ASID. Fall
* back to WBINVD if this faults so as not to make any problems worse
* by leaving stale encrypted data in the cache.
*/
WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
goto do_wbinvd;
return;
do_wbinvd:
wbinvd_on_all_cpus();
}
void sev_guest_memory_reclaimed(struct kvm *kvm)
{
if (!sev_guest(kvm))
return;
wbinvd_on_all_cpus();
}
@ -2284,7 +2280,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);
if (vcpu->arch.guest_state_protected)
sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
__free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free)

View File

@ -4620,6 +4620,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.mem_enc_ioctl = sev_mem_enc_ioctl,
.mem_enc_register_region = sev_mem_enc_register_region,
.mem_enc_unregister_region = sev_mem_enc_unregister_region,
.guest_memory_reclaimed = sev_guest_memory_reclaimed,
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
.vm_move_enc_context_from = sev_vm_move_enc_context_from,

View File

@ -609,6 +609,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
struct kvm_enc_region *range);
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
void sev_guest_memory_reclaimed(struct kvm *kvm);
void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);

View File

@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
}
if (vmx->nested.update_vmcs01_apicv_status) {
vmx->nested.update_vmcs01_apicv_status = false;
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
if ((vm_exit_reason != -1) &&
(enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;

View File

@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event,
get_sample_period(pmc, data));
pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event,
get_sample_period(pmc, data));
pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)

View File

@ -4174,6 +4174,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (is_guest_mode(vcpu)) {
vmx->nested.update_vmcs01_apicv_status = true;
return;
}
pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
if (cpu_has_secondary_exec_ctrls()) {
if (kvm_vcpu_apicv_active(vcpu))

View File

@ -183,6 +183,7 @@ struct nested_vmx {
bool change_vmcs01_virtual_apic_mode;
bool reload_vmcs01_apic_access_page;
bool update_vmcs01_cpu_dirty_logging;
bool update_vmcs01_apicv_status;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to

View File

@ -9111,7 +9111,7 @@ static void kvm_apicv_init(struct kvm *kvm)
if (!enable_apicv)
set_or_clear_apicv_inhibit(inhibits,
APICV_INHIBIT_REASON_ABSENT, true);
APICV_INHIBIT_REASON_DISABLE, true);
}
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@ -9889,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{
static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
}
static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
@ -10097,7 +10102,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Store vcpu->apicv_active before vcpu->mode. */
smp_store_release(&vcpu->mode, IN_GUEST_MODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
/*
* 1) We should set ->mode before checking ->requests. Please see
@ -10128,7 +10133,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
r = 1;
goto cancel_injection;
}
@ -10254,7 +10259,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
/*
* Profile KVM exit RIPs:
@ -10284,7 +10289,7 @@ out:
}
/* Called within kvm->srcu read side. */
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline int vcpu_block(struct kvm_vcpu *vcpu)
{
bool hv_timer;
@ -10300,12 +10305,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
if (hv_timer)
kvm_lapic_switch_to_sw_timer(vcpu);
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
kvm_vcpu_halt(vcpu);
else
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
if (hv_timer)
kvm_lapic_switch_to_hv_timer(vcpu);
@ -10347,7 +10352,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
vcpu->arch.l1tf_flush_l1d = true;
@ -10355,7 +10359,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
r = vcpu_block(kvm, vcpu);
r = vcpu_block(vcpu);
}
if (r <= 0)
@ -10374,9 +10378,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
}
if (__xfer_to_guest_mode_work_pending()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
r = xfer_to_guest_mode_handle_work(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
if (r)
return r;
}
@ -10387,12 +10391,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return r;
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
}
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
@ -10484,7 +10483,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
struct kvm *kvm = vcpu->kvm;
int r;
vcpu_load(vcpu);
@ -10492,7 +10490,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_run->flags = 0;
kvm_load_guest_fpu(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
r = -EINTR;
@ -10504,9 +10502,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm_vcpu_srcu_read_lock(vcpu);
if (kvm_apic_accept_events(vcpu) < 0) {
r = 0;
@ -10567,7 +10565,7 @@ out:
if (kvm_run->kvm_valid_regs)
store_regs(vcpu);
post_kvm_run_save(vcpu);
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu);
@ -10985,6 +10983,9 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
struct kvm_vcpu *vcpu;
unsigned long i;
if (!enable_apicv)
return;
down_write(&kvm->arch.apicv_update_lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
@ -11196,8 +11197,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
if (kvm_apicv_activated(vcpu->kvm))
/*
* Defer evaluating inhibits until the vCPU is first run, as
* this vCPU will not get notified of any changes until this
* vCPU is visible to other vCPUs (marked online and added to
* the set of vCPUs). Opportunistically mark APICv active as
* VMX in particularly is highly unlikely to have inhibits.
* Ignore the current per-VM APICv state so that vCPU creation
* is guaranteed to run with a deterministic value, the request
* will ensure the vCPU gets the correct state before VM-Entry.
*/
if (enable_apicv) {
vcpu->arch.apicv_active = true;
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
} else
static_branch_inc(&kvm_has_noapic_vcpu);

View File

@ -96,11 +96,6 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
/* T40 can not handle C3 idle state */
{ set_max_cstate, "IBM ThinkPad T40", {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
(void *)2},
{},
};
@ -795,7 +790,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
if (cx->type != ACPI_STATE_C3)
drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it

View File

@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
switch(ap->port_no)
{
case 0:
if (!ap->ioaddr.bmdma_addr)
return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;

View File

@ -33,6 +33,22 @@ config BLK_DEV_FD
To compile this driver as a module, choose M here: the
module will be called floppy.
config BLK_DEV_FD_RAWCMD
bool "Support for raw floppy disk commands (DEPRECATED)"
depends on BLK_DEV_FD
help
If you want to use actual physical floppies and expect to do
special low-level hardware accesses to them (access and use
non-standard formats, for example), then enable this.
Note that the code enabled by this option is rarely used and
might be unstable or insecure, and distros should not enable it.
Note: FDRAWCMD is deprecated and will be removed from the kernel
in the near future.
If unsure, say N.
config AMIGA_FLOPPY
tristate "Amiga floppy support"
depends on AMIGA

View File

@ -2982,6 +2982,8 @@ static const char *drive_name(int type, int drive)
return "(null)";
}
#ifdef CONFIG_BLK_DEV_FD_RAWCMD
/* raw commands */
static void raw_cmd_done(int flag)
{
@ -3181,6 +3183,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
return ret;
}
static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
void __user *param)
{
int ret;
pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
ret = raw_cmd_ioctl(cmd, param);
if (ret == -EINTR)
return -EINTR;
process_fd_request();
return ret;
}
#else /* CONFIG_BLK_DEV_FD_RAWCMD */
static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
void __user *param)
{
return -EOPNOTSUPP;
}
#endif
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
@ -3369,7 +3400,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
int i;
int ret;
int size;
union inparam {
@ -3520,16 +3550,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
outparam = &write_errors[drive];
break;
case FDRAWCMD:
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
i = raw_cmd_ioctl(cmd, (void __user *)param);
if (i == -EINTR)
return -EINTR;
process_fd_request();
return i;
return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;

View File

@ -24,13 +24,17 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
#define GT_IRQ_STATUS BIT(2)
#define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
u32 reg_dcvs_ctrl;
u32 reg_freq_lut;
u32 reg_volt_lut;
u32 reg_intr_clr;
u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{
unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
unsigned int lval;
return (val & 0x3FF) * 19200;
if (data->soc_data->reg_current_vote)
lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
else
lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
return lval * xo_rate;
}
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus);
int cpu = cpumask_first(policy->related_cpus);
struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp;
unsigned int freq;
/*
* Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure.
*/
freq = qcom_lmh_get_throttle_freq(data);
freq_hz = freq * HZ_PER_KHZ;
freq_hz = qcom_lmh_get_throttle_freq(data);
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
dev_pm_opp_find_freq_ceil(dev, &freq_hz);
opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
throttled_freq = freq_hz / HZ_PER_KHZ;
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
throttled_freq = freq_hz / HZ_PER_KHZ;
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
dev_pm_opp_put(opp);
}
/*
* In the unlikely case policy is unregistered do not enable
@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
if (c_data->soc_data->reg_intr_clr)
writel_relaxed(GT_IRQ_STATUS,
c_data->base + c_data->soc_data->reg_intr_clr);
return IRQ_HANDLED;
}
@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0,
.reg_domain_state = 0x20,
.reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100,
.reg_volt_lut = 0x200,
.reg_intr_clr = 0x308,
.reg_perf_state = 0x320,
.lut_row_size = 4,
};
@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data();
int ret;
ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
return ret;
}
static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data = policy->driver_data;
if (data->throttle_irq <= 0)
return;
return 0;
mutex_lock(&data->throttle_lock);
data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
irq_set_affinity_hint(data->throttle_irq, NULL);
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
{
free_irq(data->throttle_irq, data);
}
@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
.online = qcom_cpufreq_hw_cpu_online,
.offline = qcom_cpufreq_hw_cpu_offline,
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",

View File

@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret)
if (ret) {
kfree(opp_tables);
return ret;
}
snprintf(name, MAX_NAME_LEN, "speed%d", speed);

View File

@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <asm/cpuidle.h>
#include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"

View File

@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc;
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* microblock.
*/
descs_list = &desc->descs_list;
list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
desc = iter;
break;
}
}
residue += cur_ubc << dwidth;

View File

@ -414,14 +414,18 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
#ifdef CONFIG_64BIT
SET_CH_64(dw, chan->dir, chan->id, llp.reg,
chunk->ll_region.paddr);
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
#else /* CONFIG_64BIT */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
#endif /* CONFIG_64BIT */
}
/* Doorbell */

View File

@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
{
lockdep_assert_held(&wq->wq_lock);
idxd_wq_disable_cleanup(wq);
wq->size = 0;
wq->group = NULL;
}
@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_device_reset_cleanup(wq);
}
}
void idxd_device_clear_state(struct idxd_device *idxd)
{
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return;
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
idxd_device_wqs_clear_state(idxd);

View File

@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
*/
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
{
int rc, retries = 0;
unsigned int retries = wq->enqcmds_retries;
int rc;
do {
rc = enqcmds(portal, desc);
if (rc == 0)
break;
cpu_relax();
} while (retries++ < wq->enqcmds_retries);
} while (retries--);
return rc;
}

View File

@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
u64 xfer_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
u64 batch_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;

View File

@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr;
s32 mcu_2_firi_addr;
s32 uart_2_per_addr;
s32 uart_2_mcu_ram_addr;
s32 uart_2_mcu_addr;
s32 per_2_app_addr;
s32 mcu_2_app_addr;
s32 per_2_per_addr;
s32 uartsh_2_per_addr;
s32 uartsh_2_mcu_ram_addr;
s32 uartsh_2_mcu_addr;
s32 per_2_shp_addr;
s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr;
@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
s32 uart_2_mcu_addr;
s32 uartsh_2_mcu_addr;
s32 uart_2_mcu_rom_addr;
s32 uartsh_2_mcu_rom_addr;
/* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
@ -1796,17 +1796,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
/*
* get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
* they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
* to be compatible with legacy freescale/nxp sdma firmware, and they
* are located in the bottom part of sdma_script_start_addrs which are
* beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
* For compatibility with NXP internal legacy kernel before 4.19 which
* is based on uart ram script and mainline kernel based on uart rom
* script, both uart ram/rom scripts are present in newer sdma
* firmware. Use the rom versions if they are present (V3 or newer).
*/
if (addr->uart_2_mcu_addr)
sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
if (addr->uartsh_2_mcu_addr)
sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
if (addr->uart_2_mcu_rom_addr)
sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
if (addr->uartsh_2_mcu_rom_addr)
sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
}
}
static void sdma_load_firmware(const struct firmware *fw, void *context)
@ -1885,7 +1885,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
u32 reg, val, shift, num_map, i;
int ret = 0;
if (IS_ERR(np) || IS_ERR(gpr_np))
if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@ -1933,7 +1933,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
}
out:
if (!IS_ERR(gpr_np))
if (gpr_np)
of_node_put(gpr_np);
return ret;

View File

@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
unsigned int status;
int ret;
ret = pm_runtime_get_sync(mtkd->ddev.dev);
ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
return ret;
goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
return -EINVAL;
ret = -EINVAL;
goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
err_pm:
pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}

View File

@ -164,6 +164,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
/* ECC error count register definitions */
#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
#define ECC_ERRCNT_UECNT_SHIFT 16
#define ECC_ERRCNT_CECNT_MASK 0xFFFF
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
base = priv->baseaddr;
p = &priv->stat;
regval = readl(base + ECC_ERRCNT_OFST);
p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);

View File

@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
ctl->alg_region = *alg_region;
if (subname && dsp->fw_ver >= 2) {
ctl->subname_len = subname_len;
ctl->subname = kmemdup(subname,
strlen(subname) + 1, GFP_KERNEL);
ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
if (!ctl->subname) {
ret = -ENOMEM;
goto err_ctl;

View File

@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gpiochip_set_irq_hooks(gc);
acpi_gpiochip_request_interrupts(gc);
/*
* Using barrier() here to prevent compiler from reordering
* gc->irq.initialized before initialization of above
@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gc->irq.initialized = true;
acpi_gpiochip_request_interrupts(gc);
return 0;
}

View File

@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
mutex_lock(&p->ctx->lock);
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
if (parser->ctx) {
mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
{
int i, r;
/* TODO: Investigate why we still need the context lock */
mutex_unlock(&p->ctx->lock);
for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
return r;
goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
return r;
goto out;
break;
}
}
return 0;
out:
mutex_lock(&p->ctx->lock);
return r;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);

View File

@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx);
}
mutex_destroy(&ctx->lock);
kfree(ctx);
}

View File

@ -49,6 +49,7 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;

View File

@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
static int find_panel_or_bridge(struct device_node *node,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
if (panel) {
*panel = of_drm_find_panel(node);
if (!IS_ERR(*panel))
return 0;
/* Clear the panel pointer in case of error. */
*panel = NULL;
}
/* No panel found yet, check for a bridge next. */
if (bridge) {
*bridge = of_drm_find_bridge(node);
if (*bridge)
return 0;
}
return -EPROBE_DEFER;
}
/**
* drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports
@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
struct device_node *node;
int ret;
int ret = -EPROBE_DEFER;
struct device_node *remote;
if (!panel && !bridge)
return -EINVAL;
if (panel)
*panel = NULL;
if (bridge)
*bridge = NULL;
/* Check for a graph on the device node first. */
if (of_graph_is_present(np)) {
node = of_graph_get_remote_node(np, port, endpoint);
if (node) {
ret = find_panel_or_bridge(node, panel, bridge);
of_node_put(node);
/*
* of_graph_get_remote_node() produces a noisy error message if port
* node isn't found and the absence of the port is a legit case here,
* so at first we silently check whether graph presents in the
* device-tree node.
*/
if (!of_graph_is_present(np))
return -ENODEV;
if (!ret)
return 0;
remote = of_graph_get_remote_node(np, port, endpoint);
if (!remote)
return -ENODEV;
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
ret = 0;
else
*panel = NULL;
}
/* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
} else {
*bridge = NULL;
}
}
/* Otherwise check for any child node other than port/ports. */
for_each_available_child_of_node(np, node) {
if (of_node_name_eq(node, "port") ||
of_node_name_eq(node, "ports"))
continue;
ret = find_panel_or_bridge(node, panel, bridge);
of_node_put(node);
/* Stop at the first found occurrence. */
if (!ret)
return 0;
}
return -EPROBE_DEFER;
of_node_put(remote);
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);

View File

@ -4383,13 +4383,20 @@ intel_dp_update_420(struct intel_dp *intel_dp)
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct edid *edid;
bool vrr_capable;
intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp);
connector->detect_edid = edid;
vrr_capable = intel_vrr_is_capable(&connector->base);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
intel_dp_update_dfp(intel_dp, edid);
intel_dp_update_420(intel_dp);
@ -4422,6 +4429,9 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
drm_connector_set_vrr_capable_property(&connector->base,
false);
}
static int
@ -4572,14 +4582,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
int num_modes = 0;
edid = intel_connector->detect_edid;
if (edid) {
if (edid)
num_modes = intel_connector_update_modes(connector, edid);
if (intel_vrr_is_capable(connector))
drm_connector_set_vrr_capable_property(connector,
true);
}
/* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
intel_connector->panel.fixed_mode) {

View File

@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
}
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
return false;
}
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
return false;
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
return false;
goto unsupported;
}
if (!crtc_state->enable_psr2_sel_fetch &&
@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
return false;
}
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
return false;
}
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
goto unsupported;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
unsupported:
crtc_state->enable_psr2_sel_fetch = false;
return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,

View File

@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
/* a2xx comes with its own MMU */
return priv->is_a2xx || device_iommu_mapped(dev->dev);
return priv->is_a2xx || iommu_present(&platform_bus_type);
}
static int msm_init_vram(struct drm_device *dev)

View File

@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};

View File

@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f;
int r = 0;
dma_resv_for_each_fence(&cursor, resv, shared, f) {
dma_resv_for_each_fence(&cursor, resv, !shared, f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);

View File

@ -2,6 +2,9 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
# Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
# happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK

View File

@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock;
int ret;
ret = pm_runtime_get_sync(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;

View File

@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
return container_of(bo, struct vmw_buffer_object, base);
}
/**
* bo_is_vmw - check if the buffer object is a &vmw_buffer_object
* @bo: ttm buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
static bool bo_is_vmw(struct ttm_buffer_object *bo)
{
return bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy;
}
/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo))
if (!bo_is_vmw(bo))
return;
/* Kill any cached kernel maps before swapout */
@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo))
if (!bo_is_vmw(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
/**
* vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy)
return true;
return false;
}

View File

@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_no_fman;
}
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager,
dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
&dev_priv->vma_manager);
dev_priv->drm.vma_offset_manager);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,

View File

@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL;
vmw_resource_unreference(&res);
}
@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base);
}
tmp = vmw_resource_reference(&srf->res);
@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup);
if (ret == 0)
vmw_bo_reference(res->backup);
}
if (unlikely(ret != 0)) {
@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;

View File

@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags;
static bool disable_promotion_to_c1e;
static enum {
C1E_PROMOTION_PRESERVE,
C1E_PROMOTION_ENABLE,
C1E_PROMOTION_DISABLE
} c1e_promotion = C1E_PROMOTION_PRESERVE;
struct idle_cpu {
struct cpuidle_state *state_table;
@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
static void c1e_promotion_enable(void);
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
unsigned long long msr;
/* Check if user prefers C1E over C1. */
if (preferred_states_mask & BIT(2)) {
if (preferred_states_mask & BIT(1))
/* Both can't be enabled, stick to the defaults. */
return;
if ((preferred_states_mask & BIT(2)) &&
!(preferred_states_mask & BIT(1))) {
/* Disable C1 and enable C1E. */
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
/* Enable C1E using the "C1E promotion" bit. */
c1e_promotion_enable();
disable_promotion_to_c1e = false;
c1e_promotion = C1E_PROMOTION_ENABLE;
}
/*
@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
if (auto_demotion_disable_flags)
auto_demotion_disable();
if (disable_promotion_to_c1e)
if (c1e_promotion == C1E_PROMOTION_ENABLE)
c1e_promotion_enable();
else if (c1e_promotion == C1E_PROMOTION_DISABLE)
c1e_promotion_disable();
return 0;
@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
if (icpu) {
cpuidle_state_table = icpu->state_table;
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {

View File

@ -61,6 +61,14 @@ static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
return IRQ_HANDLED;
}
static void cypress_sf_disable_regulators(void *arg)
{
struct cypress_sf_data *touchkey = arg;
regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
touchkey->regulators);
}
static int cypress_sf_probe(struct i2c_client *client)
{
struct cypress_sf_data *touchkey;
@ -121,6 +129,12 @@ static int cypress_sf_probe(struct i2c_client *client)
return error;
}
error = devm_add_action_or_reset(&client->dev,
cypress_sf_disable_regulators,
touchkey);
if (error)
return error;
touchkey->input_dev = devm_input_allocate_device(&client->dev);
if (!touchkey->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");

View File

@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* revision register.
*/
error = pm_runtime_get_sync(dev);
if (error) {
if (error < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
pm_runtime_put_noidle(dev);
return error;

View File

@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio, ca->bdev, REQ_OP_WRITE |
REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);

View File

@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,

View File

@ -43,6 +43,7 @@
struct mtk_ecc_caps {
u32 err_mask;
u32 err_shift;
const u8 *ecc_strength;
const u32 *ecc_regs;
u8 num_ecc_strength;
@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = {
};
static const u8 ecc_strength_mt7622[] = {
4, 6, 8, 10, 12, 14, 16
4, 6, 8, 10, 12
};
enum mtk_ecc_regs {
@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
for (i = 0; i < sectors; i++) {
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
err = err >> ((i % 4) * 8);
err = err >> ((i % 4) * ecc->caps->err_shift);
err &= ecc->caps->err_mask;
if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.err_shift = 8,
.ecc_strength = ecc_strength_mt2701,
.ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.err_shift = 8,
.ecc_strength = ecc_strength_mt2712,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
.err_mask = 0x3f,
.err_mask = 0x1f,
.err_shift = 5,
.ecc_strength = ecc_strength_mt7622,
.ecc_regs = mt7622_ecc_regs,
.num_ecc_strength = 7,
.num_ecc_strength = 5,
.ecc_mode_shift = 4,
.parity_bits = 13,
.pg_irq_sel = 0,

View File

@ -2651,10 +2651,23 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
if (nandc->props->is_bam)
free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
if (nandc->props->is_bam) {
nandc->bam_txn = alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
return -ENOMEM;
}
}
/*
* DATA_UD_BYTES varies based on whether the read/write command protects
* spare data with ECC too. We protect spare data by default, so we set
@ -2955,17 +2968,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
return ret;
if (nandc->props->is_bam) {
free_bam_transaction(nandc);
nandc->bam_txn = alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
nand_cleanup(chip);
return -ENOMEM;
}
}
ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);

View File

@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
dma_addr_t dma_addr;
dma_cookie_t cookie;
uint32_t reg;
int ret;
int ret = 0;
unsigned long time_left;
if (dir == DMA_FROM_DEVICE) {
chan = flctl->chan_fifo0_rx;
@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
goto out;
}
ret =
time_left =
wait_for_completion_timeout(&flctl->dma_complete,
msecs_to_jiffies(3000));
if (ret <= 0) {
if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
}
out:
@ -441,7 +443,7 @@ out:
dma_unmap_single(chan->device->dev, dma_addr, len, dir);
/* ret > 0 is success */
/* ret == 0 is success */
return ret;
}
@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
goto convert; /* DMA success */
/* do polling transfer */
@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
return; /* DMA success */
/* do polling transfer */

View File

@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
/* Configure the RMII clock as output: */
miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:

View File

@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
return mv88e6xxx_port_wait_bit(chip,
MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,

View File

@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */

View File

@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
static void bcmgenet_hide_tsb(struct sk_buff *skb)
{
__skb_pull(skb, sizeof(struct status_64));
}
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */

View File

@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
/* Do not support TXSTART and TX CSUM offload simutaniously */
if (ndev->features & NETIF_F_CSUM_MASK)
return -EBUSY;
/* TSD and Qbv are mutually exclusive in hardware */
if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
return -EBUSY;

View File

@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
return ret;
goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);

View File

@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
if (!mac_cb)
if (!mac_cb) {
fwnode_handle_put(child);
return -ENOMEM;
}
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;

View File

@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, tx = %u.\n",
"failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, rx = %u.\n",
"failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}

Some files were not shown because too many files have changed in this diff Show More