Linux 6.10-rc2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmZc9egeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGoLsH/0xo1TNZwNRE9Qux
 gFbJDDhxMtvWnJCSqUuhpypd7SoVVEVKiXW942gWTl97PlCEH0ov3p+0UbxnmG13
 kKJT1C/gct95L03OAfGquIjBSWh4/55o6Vz1zYTGvTFpWZo7G3ZvCDY8o9kN5/L3
 mnpC+GfzZ9ckg+2TfwlbGBQUtILHV3IipCbfDFPSrT8mS0IT67uvBlND3nI++woj
 J1znGqg1PQ6yFnFCfj4RYCiyv/jEAT0ZTyokO4rH+iQVufc3y02mokhMuqmSoE6T
 5bbHToLZIaa/QjRamN/+ltwyrhv8WlX4rJOkMibJY6w8gpukt/k6gL2Pguk4y2pf
 0FPbbC0=
 =0AGs
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmZdsCUACgkQJNaLcl1U
 h9AUKwf/RYzbaI4T3Ucd47/yhV2J3Ln/Q1dve+Eh/SdVILr0lz0opZyjERp/7F+f
 QTSBrSnr3WDj+haeG7kdf89Z6dE+CaNkvvi+g+dKwAPlv+dwRT2Z38/I8336O0So
 +oo/EdysCAgMXjC8lV29pju8X9VbIPRSuTakbYpBBQD2jmdKVA4MtUnQRR/odkZ7
 GVK+VydDJU1+gkmtuRBO0EA8yM7yorb1P0M3gwPLvsRO3SFLVul8UDGm/zuEJm5M
 ArfXUGcPkUKUFpjbOl9mdDhkhPtYFwB3GiItIEASDAcCgibdHZY3F8+dJhesiIio
 vdZVQ54G6T/h4i2V5jmNaxtVv0n1xw==
 =ChA8
 -----END PGP SIGNATURE-----

regmap: Merge up fixes

We need these to get the i.MX8 boards working in CI again.
This commit is contained in:
Mark Brown 2024-06-03 12:59:25 +01:00
commit 0ae7477599
328 changed files with 3317 additions and 1843 deletions

View File

@ -337,10 +337,11 @@ Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Kees Cook <kees@kernel.org> <kees.cook@canonical.com>
Kees Cook <kees@kernel.org> <keescook@chromium.org>
Kees Cook <kees@kernel.org> <keescook@google.com>
Kees Cook <kees@kernel.org> <kees@outflux.net>
Kees Cook <kees@kernel.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>

View File

@ -1921,6 +1921,28 @@
Format:
<bus_id>,<clkrate>
i2c_touchscreen_props= [HW,ACPI,X86]
Set device-properties for ACPI-enumerated I2C-attached
touchscreen, to e.g. fix coordinates of upside-down
mounted touchscreens. If you need this option please
submit a drivers/platform/x86/touchscreen_dmi.c patch
adding a DMI quirk for this.
Format:
<ACPI_HW_ID>:<prop_name>=<val>[:prop_name=val][:...]
Where <val> is one of:
Omit "=<val>" entirely Set a boolean device-property
Unsigned number Set a u32 device-property
Anything else Set a string device-property
Examples (split over multiple lines):
i2c_touchscreen_props=GDIX1001:touchscreen-inverted-x:
touchscreen-inverted-y
i2c_touchscreen_props=MSSL1680:touchscreen-size-x=1920:
touchscreen-size-y=1080:touchscreen-inverted-y:
firmware-name=gsl1680-vendor-model.fw:silead,home-button
i8042.debug [HW] Toggle i8042 debug mode
i8042.unmask_kbd_data
[HW] Enable printing of interrupt data from the KBD port

View File

@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
Misaligned accesses
-------------------
Misaligned accesses are supported in userspace, but they may perform poorly.
Misaligned scalar accesses are supported in userspace, but they may perform
poorly. Misaligned vector accesses are only supported if the Zicclsm extension
is supported.

View File

@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
Dynamic swiotlb
---------------
When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it

View File

@ -24,6 +24,7 @@ properties:
managers:
type: object
additionalProperties: false
description:
List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
have 4 or 8 physical ports according to the chip version. No need to
@ -47,8 +48,9 @@ properties:
- "#size-cells"
patternProperties:
"^manager@0[0-9a-b]$":
"^manager@[0-9a-b]$":
type: object
additionalProperties: false
description:
PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
ports.
@ -69,9 +71,14 @@ properties:
patternProperties:
'^port@[0-7]$':
type: object
additionalProperties: false
properties:
reg:
maxItems: 1
required:
- reg
additionalProperties: false
required:
- reg

View File

@ -29,13 +29,31 @@ properties:
of the ports conversion matrix that establishes relationship between
the logical ports and the physical channels.
type: object
additionalProperties: false
properties:
"#address-cells":
const: 1
"#size-cells":
const: 0
patternProperties:
'^channel@[0-7]$':
type: object
additionalProperties: false
properties:
reg:
maxItems: 1
required:
- reg
required:
- "#address-cells"
- "#size-cells"
unevaluatedProperties: false
required:

View File

@ -349,6 +349,10 @@ attribute-sets:
Number of packets dropped due to transient lack of resources, such as
buffer space, host descriptors etc.
type: uint
-
name: rx-csum-complete
doc: Number of packets that were marked as CHECKSUM_COMPLETE.
type: uint
-
name: rx-csum-unnecessary
doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.

View File

@ -227,7 +227,7 @@ preferably including links to previous postings, for example::
The amount of mooing will depend on packet rate so should match
the diurnal cycle quite well.
Signed-of-by: Joe Defarmer <joe@barn.org>
Signed-off-by: Joe Defarmer <joe@barn.org>
---
v3:
- add a note about time-of-day mooing fluctuation to the commit message

View File

@ -3854,6 +3854,7 @@ BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Puranjay Mohan <puranjay@kernel.org>
R: Xu Kuohai <xukuohai@huaweicloud.com>
L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
@ -5187,7 +5188,6 @@ F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: James Schulman <james.schulman@cirrus.com>
M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@ -21316,7 +21316,7 @@ F: arch/riscv/boot/dts/starfive/
STARFIVE DWMAC GLUE LAYER
M: Emil Renner Berthing <kernel@esmil.dk>
M: Samin Guo <samin.guo@starfivetech.com>
M: Minda Chen <minda.chen@starfivetech.com>
S: Maintained
F: Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -39,7 +39,7 @@
/************** Functions that the back-end must provide **************/
/* Extension for 32-bit operations. */
inline u8 zext(u8 *buf, u8 rd);
u8 zext(u8 *buf, u8 rd);
/***** Moves *****/
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);

View File

@ -62,7 +62,7 @@ enum {
* If/when we decide to add ARCv2 instructions that do use register pairs,
* the mapping, hopefully, doesn't need to be revisited.
*/
const u8 bpf2arc[][2] = {
static const u8 bpf2arc[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
/* Arguments from eBPF program to in-kernel function */
@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
/************* Packers (Deal with BPF_REGs) **************/
inline u8 zext(u8 *buf, u8 rd)
u8 zext(u8 *buf, u8 rd)
{
if (rd != BPF_REG_FP)
return arc_movi_r(buf, REG_HI(rd), 0);
@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
break;
}
} else {
/*
@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
break;
}
}
@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
const struct {
static const struct {
/*
* "jit_off" is common between all "jmp[]" and is coupled with
* "cond" of each "jmp[]" instance. e.g.:
@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
* instruction that precedes the conditional branch.
*/
const u8 arcv2_32_jmps[ARC_CC_LAST] = {
static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
[ARC_CC_UGT] = CC_great_u,
[ARC_CC_UGE] = CC_great_eq_u,
[ARC_CC_ULT] = CC_less_u,

View File

@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
/* Initialise the context so there's no garbage. */
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
{
memset(ctx, 0, sizeof(ctx));
memset(ctx, 0, sizeof(*ctx));
ctx->orig_prog = prog;
@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
ctx->prog = bpf_jit_blind_constants(prog);
if (IS_ERR(ctx->prog))
return PTR_ERR(ctx->prog);
ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
ctx->blinded = (ctx->prog != ctx->orig_prog);
/* If the verifier doesn't zero-extend, then we have to do it. */
ctx->do_zext = !ctx->prog->aux->verifier_zext;
@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
}
/*
* All the "handle_*()" functions have been called before by the
* "jit_prepare()". If there was an error, we would know by now.
* Therefore, no extra error checking at this point, other than
* a sanity check at the end that expects the calculated length
* (jit.len) to be equal to the length of generated instructions
* (jit.index).
* jit_compile() is the real compilation phase. jit_prepare() is
* invoked before jit_compile() as a dry-run to make sure everything
* will go OK and allocate the necessary memory.
*
* In the end, jit_compile() checks if it has produced the same number
* of instructions as jit_prepare() would.
*/
static int jit_compile(struct jit_context *ctx)
{
@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
/*
* This function may be invoked twice for the same stream of BPF
* instructions. The "extra pass" happens, when there are "call"s
* involved that their addresses are not known during the first
* invocation.
* instructions. The "extra pass" happens, when there are
* (re)locations involved that their addresses are not known
* during the first run.
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{

View File

@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC_FPU
select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU

View File

@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
#else
#define __put_user_asm2_goto(x, addr, label) \
asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
: "r" (x), DS_FORM_CONSTRAINT (*addr) \
: \
: label)
#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
@ -165,8 +181,19 @@ do { \
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm2_goto(x, addr, label) \
__get_user_asm_goto(x, addr, label, "ld")
#else
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \
"1: ld%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
: DS_FORM_CONSTRAINT (*addr) \
: \
: label)
#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \

View File

@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
/*
* Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
* before and after the operation.
*
* This is a requirement in the Linux Kernel Memory Model.
* See __cmpxchg_u32() in asm/cmpxchg.h as an example.
*/
if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
/* Emit 'sync' to enforce full ordering */
if (IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */

View File

@ -846,6 +846,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
/*
* Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
* before and after the operation.
*
* This is a requirement in the Linux Kernel Memory Model.
* See __cmpxchg_u64() in asm/cmpxchg.h as an example.
*/
if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@ -908,6 +917,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
/* Emit 'sync' to enforce full ordering */
if (IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.

View File

@ -371,8 +371,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
pr_err_once("Error can't get the LPAR name");
if (read_rtas_lpar_name(m))
read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))

View File

@ -106,7 +106,7 @@ config RISCV
select HAS_IOPORT if MMU
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT

View File

@ -10,7 +10,7 @@
#include <asm/fence.h>
#define __arch_xchg_masked(prepend, append, r, p, n) \
#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
({ \
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
@ -25,7 +25,7 @@
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
" sc.w %1, %1, %2\n" \
" sc.w" sc_sfx " %1, %1, %2\n" \
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
@ -46,7 +46,8 @@
: "memory"); \
})
#define _arch_xchg(ptr, new, sfx, prepend, append) \
#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \
sc_append, swap_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __new = (new); \
@ -55,15 +56,15 @@
switch (sizeof(*__ptr)) { \
case 1: \
case 2: \
__arch_xchg_masked(prepend, append, \
__arch_xchg_masked(sc_sfx, prepend, sc_append, \
__ret, __ptr, __new); \
break; \
case 4: \
__arch_xchg(".w" sfx, prepend, append, \
__arch_xchg(".w" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
case 8: \
__arch_xchg(".d" sfx, prepend, append, \
__arch_xchg(".d" swap_sfx, prepend, swap_append, \
__ret, __ptr, __new); \
break; \
default: \
@ -73,16 +74,17 @@
})
#define arch_xchg_relaxed(ptr, x) \
_arch_xchg(ptr, x, "", "", "")
_arch_xchg(ptr, x, "", "", "", "", "")
#define arch_xchg_acquire(ptr, x) \
_arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
_arch_xchg(ptr, x, "", "", "", \
RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
_arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
_arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
#define arch_xchg(ptr, x) \
_arch_xchg(ptr, x, ".aqrl", "", "")
_arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
#define xchg32(ptr, x) \
({ \

View File

@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
/* Make sure tidle is updated */
smp_mb();
bdata->task_ptr = tidle;
bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
bdata->stack_ptr = task_pt_regs(tidle);
/* Make sure boot data is updated */
smp_mb();
hsm_data = __pa(bdata);

View File

@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
/* Make sure tidle is updated */
smp_mb();
WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
task_stack_page(tidle) + THREAD_SIZE);
WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}

View File

@ -114,6 +114,7 @@
#include "../perf_event.h"
#include "../probe.h"
MODULE_DESCRIPTION("Support for Intel cstate performance events");
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \

View File

@ -34,6 +34,7 @@ static struct event_constraint uncore_constraint_fixed =
struct event_constraint uncore_constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
MODULE_DESCRIPTION("Support for Intel uncore performance events");
MODULE_LICENSE("GPL");
int uncore_pcibus_to_dieid(struct pci_bus *bus)

View File

@ -64,6 +64,7 @@
#include "perf_event.h"
#include "probe.h"
MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters");
MODULE_LICENSE("GPL");
/*

View File

@ -345,6 +345,7 @@ static DECLARE_WORK(disable_freq_invariance_work,
disable_freq_invariance_workfn);
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
static void scale_freq_tick(u64 acnt, u64 mcnt)
{

View File

@ -1075,6 +1075,10 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
/* Provide a sane default if not enumerated: */
if (!c->x86_clflush_size)
c->x86_clflush_size = 32;
}
c->x86_cache_bits = c->x86_phys_bits;
@ -1585,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (have_cpuid_p()) {
cpu_detect(c);
get_cpu_vendor(c);
intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
@ -1744,7 +1749,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
cpu_detect(c);
get_cpu_vendor(c);
intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
get_cpu_address_sizes(c);

View File

@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
void tsx_ap_init(void);
void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void init_spectral_chicken(struct cpuinfo_x86 *c);

View File

@ -269,19 +269,26 @@ detect_keyid_bits:
c->x86_phys_bits -= keyid_bits;
}
void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
return;
/*
* The BIOS can have limited CPUID to leaf 2, which breaks feature
* enumeration. Unlock it and update the maximum leaf info.
*/
if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
c->cpuid_level = cpuid_eax(0);
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
/* Unmask CPUID levels if masked: */
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
}
}
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);

View File

@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
/*
* If leaf 0xb is available, then the domain shifts are set
* already and nothing to do here.
* already and nothing to do here. Only valid for family >= 0x17.
*/
if (!has_topoext) {
if (!has_topoext && tscan->c->x86 >= 0x17) {
/*
* Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it.

View File

@ -104,6 +104,7 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
static int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
/*
* Unless otherwise specified, default to 512 byte logical blocks and a
@ -134,8 +135,11 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
return -EINVAL;
logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
return -EINVAL;
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
lim->logical_block_size >> SECTOR_SHIFT);
logical_block_sectors);
/*
* The actual max_sectors value is a complex beast and also takes the
@ -153,7 +157,7 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
lim->max_sectors = round_down(lim->max_sectors,
lim->logical_block_size >> SECTOR_SHIFT);
logical_block_sectors);
/*
* Random default for the maximum number of segments. Driver should not
@ -611,6 +615,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,

View File

@ -64,7 +64,6 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);

View File

@ -1399,32 +1399,32 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
bps_dft = U64_MAX;
iops_dft = UINT_MAX;
if (tg->bps_conf[READ] == bps_dft &&
tg->bps_conf[WRITE] == bps_dft &&
tg->iops_conf[READ] == iops_dft &&
tg->iops_conf[WRITE] == iops_dft)
if (tg->bps[READ] == bps_dft &&
tg->bps[WRITE] == bps_dft &&
tg->iops[READ] == iops_dft &&
tg->iops[WRITE] == iops_dft)
return 0;
seq_printf(sf, "%s", dname);
if (tg->bps_conf[READ] == U64_MAX)
if (tg->bps[READ] == U64_MAX)
seq_printf(sf, " rbps=max");
else
seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
seq_printf(sf, " rbps=%llu", tg->bps[READ]);
if (tg->bps_conf[WRITE] == U64_MAX)
if (tg->bps[WRITE] == U64_MAX)
seq_printf(sf, " wbps=max");
else
seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
if (tg->iops_conf[READ] == UINT_MAX)
if (tg->iops[READ] == UINT_MAX)
seq_printf(sf, " riops=max");
else
seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
seq_printf(sf, " riops=%u", tg->iops[READ]);
if (tg->iops_conf[WRITE] == UINT_MAX)
if (tg->iops[WRITE] == UINT_MAX)
seq_printf(sf, " wiops=max");
else
seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
seq_printf(sf, "\n");
return 0;

View File

@ -95,15 +95,11 @@ struct throtl_grp {
bool has_rules_bps[2];
bool has_rules_iops[2];
/* internally used bytes per second rate limits */
/* bytes per second rate limits */
uint64_t bps[2];
/* user configured bps limits */
uint64_t bps_conf[2];
/* internally used IOPS limits */
/* IOPS limits */
unsigned int iops[2];
/* user configured IOPS limits */
unsigned int iops_conf[2];
/* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2];

View File

@ -450,6 +450,25 @@ static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
{
return zone->start + zone->len >= get_capacity(disk);
}
static bool disk_zone_is_full(struct gendisk *disk,
unsigned int zno, unsigned int offset_in_zone)
{
if (zno < disk->nr_zones - 1)
return offset_in_zone >= disk->zone_capacity;
return offset_in_zone >= disk->last_zone_capacity;
}
static bool disk_zone_wplug_is_full(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
}
static bool disk_insert_zone_wplug(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
@ -543,7 +562,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
return false;
/* We can remove zone write plugs for zones that are empty or full. */
return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity;
return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
}
static void disk_remove_zone_wplug(struct gendisk *disk,
@ -664,13 +683,12 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
unsigned int zone_capacity = disk->zone_capacity;
unsigned int wp_offset = zwplug->wp_offset;
struct bio_list bl = BIO_EMPTY_LIST;
struct bio *bio;
while ((bio = bio_list_pop(&zwplug->bio_list))) {
if (wp_offset >= zone_capacity ||
if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
bio_offset_from_zone_start(bio) != wp_offset)) {
blk_zone_wplug_bio_io_error(zwplug, bio);
@ -909,7 +927,6 @@ void blk_zone_write_plug_init_request(struct request *req)
sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
struct request_queue *q = req->q;
struct gendisk *disk = q->disk;
unsigned int zone_capacity = disk->zone_capacity;
struct blk_zone_wplug *zwplug =
disk_get_zone_wplug(disk, blk_rq_pos(req));
unsigned long flags;
@ -933,7 +950,7 @@ void blk_zone_write_plug_init_request(struct request *req)
* into the back of the request.
*/
spin_lock_irqsave(&zwplug->lock, flags);
while (zwplug->wp_offset < zone_capacity) {
while (!disk_zone_wplug_is_full(disk, zwplug)) {
bio = bio_list_peek(&zwplug->bio_list);
if (!bio)
break;
@ -979,7 +996,7 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
* We know such BIO will fail, and that would potentially overflow our
* write pointer offset beyond the end of the zone.
*/
if (zwplug->wp_offset >= disk->zone_capacity)
if (disk_zone_wplug_is_full(disk, zwplug))
goto err;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
@ -1556,6 +1573,7 @@ void disk_free_zone_resources(struct gendisk *disk)
kfree(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
disk->zone_capacity = 0;
disk->last_zone_capacity = 0;
disk->nr_zones = 0;
}
@ -1600,6 +1618,7 @@ struct blk_revalidate_zone_args {
unsigned long *conv_zones_bitmap;
unsigned int nr_zones;
unsigned int zone_capacity;
unsigned int last_zone_capacity;
sector_t sector;
};
@ -1617,6 +1636,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
disk->nr_zones = args->nr_zones;
disk->zone_capacity = args->zone_capacity;
disk->last_zone_capacity = args->last_zone_capacity;
swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
if (disk->conv_zones_bitmap)
nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
@ -1668,6 +1688,9 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
if (disk_zone_is_last(disk, zone))
args->last_zone_capacity = zone->capacity;
if (!disk_need_zone_resources(disk))
return 0;
@ -1693,11 +1716,14 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
/*
* Remember the capacity of the first sequential zone and check
* if it is constant for all zones.
* if it is constant for all zones, ignoring the last zone as it can be
* smaller.
*/
if (!args->zone_capacity)
args->zone_capacity = zone->capacity;
if (zone->capacity != args->zone_capacity) {
if (disk_zone_is_last(disk, zone)) {
args->last_zone_capacity = zone->capacity;
} else if (zone->capacity != args->zone_capacity) {
pr_warn("%s: Invalid variable zone capacity\n",
disk->disk_name);
return -ENODEV;
@ -1732,7 +1758,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
{
struct blk_revalidate_zone_args *args = data;
struct gendisk *disk = args->disk;
sector_t capacity = get_capacity(disk);
sector_t zone_sectors = disk->queue->limits.chunk_sectors;
int ret;
@ -1743,7 +1768,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
if (zone->start >= capacity || !zone->len) {
if (zone->start >= get_capacity(disk) || !zone->len) {
pr_warn("%s: Invalid zone start %llu, length %llu\n",
disk->disk_name, zone->start, zone->len);
return -ENODEV;
@ -1753,7 +1778,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
* All zones must have the same size, with the exception on an eventual
* smaller last zone.
*/
if (zone->start + zone->len < capacity) {
if (!disk_zone_is_last(disk, zone)) {
if (zone->len != zone_sectors) {
pr_warn("%s: Invalid zoned device with non constant zone size\n",
disk->disk_name);

View File

@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
{ PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -4136,8 +4136,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
/* Crucial BX100 SSD 500GB has broken LPM support */
/* Crucial devices with broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
{ "CT240BX500SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@ -4155,6 +4156,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
/* AMD Radeon devices with broken LPM support */
{ "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
/* Apacer models with LPM issues */
{ "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },

View File

@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
max_write = quirks->max_write_len;
max_write = quirks->max_write_len -
(config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);

View File

@ -494,6 +494,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
ret = count;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
dev->power = newp;

View File

@ -74,6 +74,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
return -EINVAL;
}
/*
* If a smaller zone capacity was requested, do not allow a smaller last
* zone at the same time as such zone configuration does not correspond
* to any real zoned device.
*/
if (dev->zone_capacity != dev->zone_size &&
dev->size & (dev->zone_size - 1)) {
pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
return -EINVAL;
}
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
@ -108,7 +119,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
dev->nr_zones);
dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");

View File

@ -64,19 +64,6 @@ static size_t rng_buffer_size(void)
return RNG_BUFFER_SIZE;
}
static void add_early_randomness(struct hwrng *rng)
{
int bytes_read;
mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0) {
size_t entropy = bytes_read * 8 * rng->quality / 1024;
add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
}
}
static inline void cleanup_rng(struct kref *kref)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
@ -340,13 +327,12 @@ static ssize_t rng_current_store(struct device *dev,
const char *buf, size_t len)
{
int err;
struct hwrng *rng, *old_rng, *new_rng;
struct hwrng *rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
@ -362,11 +348,8 @@ static ssize_t rng_current_store(struct device *dev,
new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
if (new_rng) {
if (new_rng != old_rng)
add_early_randomness(new_rng);
if (new_rng)
put_rng(new_rng);
}
return err ? : len;
}
@ -544,7 +527,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@ -573,25 +555,8 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
/* to use current_rng in add_early_randomness() we need
* to take a ref
*/
is_new_current = true;
kref_get(&rng->ref);
}
mutex_unlock(&rng_mutex);
if (is_new_current || !rng->init) {
/*
* Use a new device's input to add some randomness to
* the system. If this rng device isn't going to be
* used right away, its init function hasn't been
* called yet by set_current_rng(); so only use the
* randomness from devices that don't need an init callback
*/
add_early_randomness(rng);
}
if (is_new_current)
put_rng(rng);
return 0;
out_unlock:
mutex_unlock(&rng_mutex);
@ -602,12 +567,11 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
struct hwrng *old_rng, *new_rng;
struct hwrng *new_rng;
int err;
mutex_lock(&rng_mutex);
old_rng = current_rng;
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
@ -626,11 +590,8 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
if (new_rng) {
if (old_rng != new_rng)
add_early_randomness(new_rng);
if (new_rng)
put_rng(new_rng);
}
wait_for_completion(&rng->cleanup_done);
}

View File

@ -29,7 +29,7 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
default y
default X86_64
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256

View File

@ -223,30 +223,4 @@ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
}
EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
static u16 tpm_buf_tag(struct tpm_buf *buf)
{
struct tpm_header *head = (struct tpm_header *)buf->data;
return be16_to_cpu(head->tag);
}
/**
* tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
* @buf: tpm_buf to use
*
* Where the parameters are located depends on the tag of a TPM
* command (it's immediately after the header for TPM_ST_NO_SESSIONS
* or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
* pointer to the first byte of the parameters area.
*
* @return: pointer to parameters area
*/
u8 *tpm_buf_parameters(struct tpm_buf *buf)
{
int offset = TPM_HEADER_SIZE;
if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
offset += 4;
return &buf->data[offset];
}

View File

@ -281,6 +281,7 @@ struct tpm2_get_random_out {
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
struct tpm2_get_random_out *out;
struct tpm_header *head;
struct tpm_buf buf;
u32 recd;
u32 num_bytes = max;
@ -288,6 +289,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
int total = 0;
int retries = 5;
u8 *dest_ptr = dest;
off_t offset;
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
@ -320,7 +322,13 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
goto out;
}
out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
head = (struct tpm_header *)buf.data;
offset = TPM_HEADER_SIZE;
/* Skip the parameter size field: */
if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS)
offset += 4;
out = (struct tpm2_get_random_out *)&buf.data[offset];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +

View File

@ -80,6 +80,9 @@
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
#define AES_KEY_BYTES AES_KEYSIZE_128
#define AES_KEY_BITS (AES_KEY_BYTES*8)
static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
@ -954,6 +957,20 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
}
EXPORT_SYMBOL(tpm2_start_auth_session);
/*
* A mask containing the object attributes for the kernel held null primary key
* used in HMAC encryption. For more information on specific attributes look up
* to "8.3 TPMA_OBJECT (Object Attributes)".
*/
#define TPM2_OA_NULL_KEY ( \
TPM2_OA_NO_DA | \
TPM2_OA_FIXED_TPM | \
TPM2_OA_FIXED_PARENT | \
TPM2_OA_SENSITIVE_DATA_ORIGIN | \
TPM2_OA_USER_WITH_AUTH | \
TPM2_OA_DECRYPT | \
TPM2_OA_RESTRICTED)
/**
* tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
*
@ -1018,7 +1035,7 @@ static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
val = tpm_buf_read_u32(buf, &offset_t);
/* object properties */
if (val != TPM2_OA_TMPL)
if (val != TPM2_OA_NULL_KEY)
return -EINVAL;
/* auth policy (empty) */
@ -1178,7 +1195,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
tpm_buf_append_u16(&template, TPM_ALG_SHA256);
/* object properties */
tpm_buf_append_u32(&template, TPM2_OA_TMPL);
tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY);
/* sauth policy (empty) */
tpm_buf_append_u16(&template, 0);

View File

@ -37,6 +37,7 @@
#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
#define SPI_HDRSIZE 4
/*
* TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;

View File

@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
if (IS_ERR(t[i].task)) {
ret = PTR_ERR(t[i].task);
while (--i >= 0)
kthread_stop_put(t[i].task);
return ret;
}
get_task_struct(t[i].task);
}

View File

@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value);
spin_lock_irq(&obj->lock);
spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
spin_unlock_irq(&obj->lock);
spin_unlock(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,

View File

@ -579,4 +579,5 @@ static struct kunit_suite packet_serdes_test_suite = {
};
kunit_test_suite(packet_serdes_test_suite);
MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite");
MODULE_LICENSE("GPL");

View File

@ -86,4 +86,5 @@ static struct kunit_suite structure_layout_test_suite = {
};
kunit_test_suite(structure_layout_test_suite);
MODULE_DESCRIPTION("FireWire UAPI unit test suite");
MODULE_LICENSE("GPL");

View File

@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
(adev->flags & AMD_IS_APU) ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
!(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
!(adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;

View File

@ -5944,13 +5944,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
while ((parent = pci_upstream_bridge(parent))) {
/* skip upstream/downstream switches internal to dGPU*/
if (parent->vendor == PCI_VENDOR_ID_ATI)
continue;
*speed = pcie_get_speed_cap(parent);
*width = pcie_get_width_cap(parent);
break;
if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
while ((parent = pci_upstream_bridge(parent))) {
/* skip upstream/downstream switches internal to dGPU*/
if (parent->vendor == PCI_VENDOR_ID_ATI)
continue;
*speed = pcie_get_speed_cap(parent);
*width = pcie_get_width_cap(parent);
break;
}
} else {
/* use the current speeds rather than max if switching is not supported */
pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
}

View File

@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000

View File

@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->bo, *pbo;
struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags;
unsigned int level;
if (WARN_ON(!parent))
return -EINVAL;
bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;

View File

@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->gmc.num_mem_partitions == num_xcc / 2)
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
AMDGPU_QPX_PARTITION_MODE;
AMDGPU_CPX_PARTITION_MODE;
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
return AMDGPU_DPX_PARTITION_MODE;

View File

@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
if ((adev->pdev->device == 0x7460 &&
adev->pdev->revision == 0x00) ||
(adev->pdev->device == 0x7461 &&
adev->pdev->revision == 0x00))
/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
gfx_target_version = 110005;
else
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
gfx_target_version = 110001;
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 5, 0):

View File

@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
if (adev->flags & AMD_IS_APU)
return 0;
pgmap = &kfddev->pgmap;

View File

@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
if (node->adev->gmc.is_app_apu ||
node->adev->flags & AMD_IS_APU)
if (node->adev->flags & AMD_IS_APU)
return 0;
if (prange->preferred_loc == gpuid ||
@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
if (bo_node->adev->gmc.is_app_apu ||
bo_node->adev->flags & AMD_IS_APU) {
if (bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0;
goto out;
}

View File

@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
(adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);

View File

@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base,
dev->mode_config.tile_property,
0);
connector->colorspace_property = master->base.colorspace_property;
if (connector->colorspace_property)
drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop);

View File

@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
};
struct atom_svid2_voltage_object_v4

View File

@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v14_0_2_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);

View File

@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
if (size < chunk_size)
return -EINVAL;
if (chunk_size < PAGE_SIZE)
if (chunk_size < SZ_4K)
return -EINVAL;
if (!is_power_of_2(chunk_size))

View File

@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
ret = drm_gem_shmem_get_pages(shmem);
return ret;
@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);

View File

@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
int ret;
ret = component_add_typed(i915->drm.dev,
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
drm_err(&i915->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
}
static void i915_audio_component_register(struct drm_i915_private *i915)
{
int ret;
ret = component_add_typed(i915->drm.dev,
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
drm_err(&i915->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
i915->display.audio.component_registered = true;
}
@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
void intel_audio_register(struct drm_i915_private *i915)
{
if (!i915->display.audio.lpe.platdev)
i915_audio_component_register(i915);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data

View File

@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);

View File

@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
intel_audio_register(i915);
intel_display_debugfs_register(i915);
/*

View File

@ -255,6 +255,7 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@ -2685,6 +2686,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
struct intel_gt *gt;
unsigned int idx;
int err;
@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
if (gt->info.id)
intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
/*
* This works in conjunction with eb_select_engine() to prevent
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);

View File

@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
/* TODO: make DPT shrinkable when it has no bound vmas */
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
!obj->is_dpt;
}
static inline bool

View File

@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);

View File

@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
/* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
/* And confirm that we still want irqs enabled before we yield */
if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
if (!b->irq_armed || __i915_request_is_complete(rq))
if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}

View File

@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
/*
* Store the number of active cslices before
* changing the CCS engine configuration
*/
gt->ccs.cslices = CCS_MASK(gt);
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0);
/* Put back in the first CCS engine */

View File

@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
if (CCS_MASK(gt) & BIT(cslice))
if (gt->ccs.cslices & BIT(cslice))
/*
* If available, assign the cslice
* to the first available engine...

View File

@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
struct {
/*
* Mask of the non fused CCS slices
* to be used for the load balancing
*/
intel_engine_mask_t cslices;
} ccs;
/*
* Default address space (either GGTT or ppGTT depending on arch).
*

View File

@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
#define GUC_KLV_0_KEY (0xffff << 16)
#define GUC_KLV_0_LEN (0xffff << 0)
#define GUC_KLV_n_VALUE (0xffffffff << 0)
#define GUC_KLV_0_KEY (0xffffu << 16)
#define GUC_KLV_0_LEN (0xffffu << 0)
#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs

View File

@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
if (bo->heap_size)
return -EINVAL;
return drm_gem_shmem_pin(&bo->base);
return drm_gem_shmem_pin_locked(&bo->base);
}
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)

View File

@ -538,7 +538,7 @@ class Parser(object):
self.variants.add(reg.domain)
def do_validate(self, schemafile):
if self.validate == False:
if not self.validate:
return
try:
@ -948,7 +948,8 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn', type=str, required=True)
parser.add_argument('--xml', type=str, required=True)
parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
parser.add_argument('--validate', default=False, action='store_true')
parser.add_argument('--no-validate', dest='validate', action='store_false')
subparsers = parser.add_subparsers()
subparsers.required = True

View File

@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
u32 args_size;
u8 stack[128];
int ret;
if (sizeof(*args) + size > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
if (check_add_overflow(sizeof(*args), size, &args_size))
return -ENOMEM;
if (args_size > sizeof(stack)) {
args = kmalloc(args_size, GFP_KERNEL);
if (!args)
return -ENOMEM;
} else {
args = (void *)stack;
@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
ret = nvif_object_ioctl(object, args, args_size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
object->map.size = 0;
if (parent) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
u32 args_size;
if (check_add_overflow(sizeof(*args), size, &args_size)) {
nvif_object_dtor(object);
return -ENOMEM;
}
args = kmalloc(args_size, GFP_KERNEL);
if (!args) {
nvif_object_dtor(object);
return -ENOMEM;
}
@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->new.oclass = oclass;
memcpy(args->new.data, data, size);
ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
&object->priv);
ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
memcpy(data, args->new.data, size);
kfree(args);
if (ret == 0)

View File

@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for LG sw43408 panel.
The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per

View File

@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
}
const struct backlight_ops sw43408_backlight_ops = {
static const struct backlight_ops sw43408_backlight_ops = {
.update_status = sw43408_backlight_update_status,
};

View File

@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
.hsync_start = 240 + 28,
.hsync_end = 240 + 28 + 10,
.htotal = 240 + 28 + 10 + 10,
.hsync_start = 240 + 38,
.hsync_end = 240 + 38 + 10,
.htotal = 240 + 38 + 10 + 10,
.vdisplay = 280,
.vsync_start = 280 + 8,
.vsync_end = 280 + 8 + 4,
.vtotal = 280 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 37,
.vsync_start = 280 + 48,
.vsync_end = 280 + 48 + 4,
.vtotal = 280 + 48 + 4 + 4,
.width_mm = 37,
.height_mm = 43,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};

View File

@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
if (bo->is_heap)
return -EINVAL;
return drm_gem_shmem_pin(&bo->base);
return drm_gem_shmem_pin_locked(&bo->base);
}
static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)

View File

@ -505,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
* Eventually we will have a fully 50% fragmented mm.
*/
mm_size = PAGE_SIZE << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
mm_size = SZ_4K << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
@ -520,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
for (order = top; order--;) {
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
mm_size, size, size,
&tmp, flags),
@ -534,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
/* There should be one final page for this sub-allocation */
size = get_size(0, PAGE_SIZE);
size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM for hole\n");
@ -544,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
list_move_tail(&block->link, &holes);
size = get_size(top, PAGE_SIZE);
size = get_size(top, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
@ -555,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
@ -584,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
* page left.
*/
mm_size = PAGE_SIZE << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
mm_size = SZ_4K << max_order;
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order < max_order; order++) {
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@ -604,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* And now the last remaining block available */
size = get_size(0, PAGE_SIZE);
size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM on final alloc\n");
@ -616,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
/* Should be completely full! */
for (order = max_order; order--;) {
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@ -632,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@ -647,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* To confirm, now the whole mm should be available */
size = get_size(max_order, PAGE_SIZE);
size = get_size(max_order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
@ -678,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
* try to allocate them all.
*/
mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order <= max_order; order++) {
size = get_size(order, PAGE_SIZE);
size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@ -699,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
}
/* Should be completely full! */
size = get_size(0, PAGE_SIZE);
size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@ -716,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
LIST_HEAD(allocated);
struct drm_buddy mm;
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
"mm.max_order(%d) != %d\n", mm.max_order,
@ -724,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
size = mm.chunk_size << mm.max_order;
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
PAGE_SIZE, &allocated, flags));
mm.chunk_size, &allocated, flags));
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
KUNIT_EXPECT_TRUE(test, block);
@ -734,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_order(block), mm.max_order);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE,
BIT_ULL(mm.max_order) * mm.chunk_size,
"block size(%llu) != %llu\n",
drm_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE);
BIT_ULL(mm.max_order) * mm.chunk_size);
drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);

View File

@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return 0;
err_entity:
mutex_unlock(&guc->submission_state.lock);
xe_sched_entity_fini(&ge->entity);
err_sched:
xe_sched_fini(&ge->sched);

View File

@ -34,7 +34,6 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_vm.h"
#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
* Due to workaround 16017236439, odd instance hardware copy engines are
* faster than even instance ones.
* This function returns the mask involving all fast copy engines and the
* reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
if (!XE_WA(gt, 16017236439) ||
xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
/*
* XXX: Currently only reserving 1 (likely slow) BCS instance on
* PVC, may want to revisit if performance is needed.
*/
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |

View File

@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
true, timeout_base_ms * 1000, true);
true, 50 * 1000, true);
preempt_enable();
out:

View File

@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
},
{
.ident = "Dell G15 5511",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
},
{ }
};

View File

@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
};
static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
{ 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
{ 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
};
static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {

View File

@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) {
if (!val)
if (!val) {
fwnode_handle_put(child);
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
}
st->r_sense_uohm[addr] = val;
}
}

View File

@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
if (np) {
data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
} else {
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;

View File

@ -850,7 +850,6 @@ static int xlnx_mbox_init_sgi(struct platform_device *pdev,
return ret;
}
irq_to_desc(pdata->virq_sgi);
irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
/* Setup function for the CPU hot-plug cases */

View File

@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b)
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{
BUG_ON(!ca->set->gc_mark_valid);
return (!GC_MARK(b) ||
GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) &&
can_inc_bucket_gen(b);
return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) && can_inc_bucket_gen(b));
}
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
b->reclaimable_in_gc = 0;
}
static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg)
*/
retry_invalidate:
allocator_wait(ca, ca->set->gc_mark_valid &&
!ca->invalidate_needs_gc);
allocator_wait(ca, !ca->invalidate_needs_gc);
invalidate_buckets(ca);
/*
@ -501,8 +498,8 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
ca = c->cache;
b = bch_bucket_alloc(ca, reserve, wait);
if (b == -1)
goto err;
if (b < 0)
return -1;
k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
@ -511,10 +508,6 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
SET_KEY_PTRS(k, 1);
return 0;
err:
bch_bucket_free(c, k);
bkey_put(c, k);
return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,

View File

@ -200,6 +200,7 @@ struct bucket {
uint8_t gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint16_t gc_mark; /* Bitfield used by GC. See below for field */
uint16_t reclaimable_in_gc:1;
};
/*

View File

@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c)
mutex_lock(&c->bucket_lock);
c->gc_mark_valid = 0;
c->gc_done = ZERO_KEY;
ca = c->cache;
for_each_bucket(b, ca) {
b->last_gc = b->gen;
if (bch_can_invalidate_bucket(ca, b))
b->reclaimable_in_gc = 1;
if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
}
}
c->gc_mark_valid = 0;
mutex_unlock(&c->bucket_lock);
}
@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c)
for_each_bucket(b, ca) {
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
if (b->reclaimable_in_gc)
b->reclaimable_in_gc = 0;
if (atomic_read(&b->pin))
continue;

View File

@ -369,10 +369,24 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
struct io *i;
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
(bio_op(bio) == REQ_OP_DISCARD))
goto skip;
if (c->gc_stats.in_use > CUTOFF_CACHE_ADD) {
/*
* If cached buckets are all clean now, 'true' will be
* returned and all requests will bypass the cache device.
* Then c->sectors_to_gc has no chance to be negative, and
* gc thread won't wake up and caching won't work forever.
* Here call force_wake_up_gc() to avoid such aftermath.
*/
if (BDEV_STATE(&dc->sb) == BDEV_STATE_CLEAN &&
c->gc_mark_valid)
force_wake_up_gc(c);
goto skip;
}
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
op_is_write(bio_op(bio))))

View File

@ -1981,10 +1981,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
r = queue_limits_set(q, limits);
if (r)
return r;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
@ -2036,15 +2032,16 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* For a zoned target, setup the zones related queue attributes
* and resources necessary for zone append emulation if necessary.
*/
if (blk_queue_is_zoned(q)) {
r = dm_set_zones_restrictions(t, q);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
if (blk_queue_is_zoned(q) &&
!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
}
r = queue_limits_set(q, limits);
if (r)
return r;
dm_update_crypto_profile(q, t);
/*

View File

@ -160,37 +160,6 @@ static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
return 0;
}
static int dm_check_zoned(struct mapped_device *md, struct dm_table *t)
{
struct gendisk *disk = md->disk;
unsigned int nr_conv_zones = 0;
int ret;
/* Count conventional zones */
md->zone_revalidate_map = t;
ret = dm_blk_report_zones(disk, 0, UINT_MAX,
dm_check_zoned_cb, &nr_conv_zones);
md->zone_revalidate_map = NULL;
if (ret < 0) {
DMERR("Check zoned failed %d", ret);
return ret;
}
/*
* If we only have conventional zones, expose the mapped device as
* a regular device.
*/
if (nr_conv_zones >= ret) {
disk->queue->limits.max_open_zones = 0;
disk->queue->limits.max_active_zones = 0;
disk->queue->limits.zoned = false;
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
disk->nr_zones = 0;
}
return 0;
}
/*
* Revalidate the zones of a mapped device to initialize resource necessary
* for zone append emulation. Note that we cannot simply use the block layer
@ -251,9 +220,12 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
return true;
}
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
unsigned int nr_conv_zones = 0;
int ret;
/*
@ -265,21 +237,37 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
blk_queue_max_zone_append_sectors(q, 0);
lim->max_zone_append_sectors = 0;
}
if (!get_capacity(md->disk))
return 0;
/*
* Check that the mapped device will indeed be zoned, that is, that it
* has sequential write required zones.
* Count conventional zones to check that the mapped device will indeed
* have sequential write required zones.
*/
ret = dm_check_zoned(md, t);
if (ret)
md->zone_revalidate_map = t;
ret = dm_blk_report_zones(disk, 0, UINT_MAX,
dm_check_zoned_cb, &nr_conv_zones);
md->zone_revalidate_map = NULL;
if (ret < 0) {
DMERR("Check zoned failed %d", ret);
return ret;
if (!blk_queue_is_zoned(q))
}
/*
* If we only have conventional zones, expose the mapped device as
* a regular device.
*/
if (nr_conv_zones >= ret) {
lim->max_open_zones = 0;
lim->max_active_zones = 0;
lim->zoned = false;
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
disk->nr_zones = 0;
return 0;
}
if (!md->disk->nr_zones) {
DMINFO("%s using %s zone append",
@ -287,7 +275,13 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
queue_emulates_zone_append(q) ? "emulated" : "native");
}
return dm_revalidate_zones(md, t);
ret = dm_revalidate_zones(md, t);
if (ret < 0)
return ret;
if (!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
return 0;
}
/*

View File

@ -101,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* Zoned targets related functions.
*/
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,

View File

@ -3142,7 +3142,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
interface = PHY_INTERFACE_MODE_RGMII;
interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)

View File

@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,

Some files were not shown because too many files have changed in this diff Show More